repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
HarveyHunt/i3situation
i3situation/core/status.py
Status.handle_events
def handle_events(self): """ An event handler that processes events from stdin and calls the on_click function of the respective object. This function is run in another thread, so as to not stall the main thread. """ for event in sys.stdin: if event.startswith('['): continue name = json.loads(event.lstrip(','))['name'] for obj in self.loader.objects: if obj.output_options['name'] == name: obj.on_click(json.loads(event.lstrip(',')))
python
def handle_events(self): """ An event handler that processes events from stdin and calls the on_click function of the respective object. This function is run in another thread, so as to not stall the main thread. """ for event in sys.stdin: if event.startswith('['): continue name = json.loads(event.lstrip(','))['name'] for obj in self.loader.objects: if obj.output_options['name'] == name: obj.on_click(json.loads(event.lstrip(',')))
[ "def", "handle_events", "(", "self", ")", ":", "for", "event", "in", "sys", ".", "stdin", ":", "if", "event", ".", "startswith", "(", "'['", ")", ":", "continue", "name", "=", "json", ".", "loads", "(", "event", ".", "lstrip", "(", "','", ")", ")", "[", "'name'", "]", "for", "obj", "in", "self", ".", "loader", ".", "objects", ":", "if", "obj", ".", "output_options", "[", "'name'", "]", "==", "name", ":", "obj", ".", "on_click", "(", "json", ".", "loads", "(", "event", ".", "lstrip", "(", "','", ")", ")", ")" ]
An event handler that processes events from stdin and calls the on_click function of the respective object. This function is run in another thread, so as to not stall the main thread.
[ "An", "event", "handler", "that", "processes", "events", "from", "stdin", "and", "calls", "the", "on_click", "function", "of", "the", "respective", "object", ".", "This", "function", "is", "run", "in", "another", "thread", "so", "as", "to", "not", "stall", "the", "main", "thread", "." ]
train
https://github.com/HarveyHunt/i3situation/blob/3160a21006fcc6961f240988874e228a5ec6f18e/i3situation/core/status.py#L141-L153
Alir3z4/django-databrowse
django_databrowse/plugins/fieldchoices.py
FieldChoicePlugin.field_dict
def field_dict(self, model): """ Helper function that returns a dictionary of all fields in the given model. If self.field_filter is set, it only includes the fields that match the filter. """ if self.field_filter: return dict( [(f.name, f) for f in model._meta.fields if self.field_filter(f)] ) else: return dict( [(f.name, f) for f in model._meta.fields if not f.rel and not f.primary_key and not f.unique and not isinstance(f, (models.AutoField, models.TextField))] )
python
def field_dict(self, model): """ Helper function that returns a dictionary of all fields in the given model. If self.field_filter is set, it only includes the fields that match the filter. """ if self.field_filter: return dict( [(f.name, f) for f in model._meta.fields if self.field_filter(f)] ) else: return dict( [(f.name, f) for f in model._meta.fields if not f.rel and not f.primary_key and not f.unique and not isinstance(f, (models.AutoField, models.TextField))] )
[ "def", "field_dict", "(", "self", ",", "model", ")", ":", "if", "self", ".", "field_filter", ":", "return", "dict", "(", "[", "(", "f", ".", "name", ",", "f", ")", "for", "f", "in", "model", ".", "_meta", ".", "fields", "if", "self", ".", "field_filter", "(", "f", ")", "]", ")", "else", ":", "return", "dict", "(", "[", "(", "f", ".", "name", ",", "f", ")", "for", "f", "in", "model", ".", "_meta", ".", "fields", "if", "not", "f", ".", "rel", "and", "not", "f", ".", "primary_key", "and", "not", "f", ".", "unique", "and", "not", "isinstance", "(", "f", ",", "(", "models", ".", "AutoField", ",", "models", ".", "TextField", ")", ")", "]", ")" ]
Helper function that returns a dictionary of all fields in the given model. If self.field_filter is set, it only includes the fields that match the filter.
[ "Helper", "function", "that", "returns", "a", "dictionary", "of", "all", "fields", "in", "the", "given", "model", ".", "If", "self", ".", "field_filter", "is", "set", "it", "only", "includes", "the", "fields", "that", "match", "the", "filter", "." ]
train
https://github.com/Alir3z4/django-databrowse/blob/4469495cd47a0da506ddf4e8cc752c2f453e0339/django_databrowse/plugins/fieldchoices.py#L25-L43
HarveyHunt/i3situation
i3situation/core/plugin_manager.py
Thread.run
def run(self): """ Calls the main function of a plugin and mutates the output dict with its return value. Provides an easy way to change the output whilst not needing to constantly poll a queue in another thread and allowing plugin's to manage their own intervals. """ self.running = True while self.running: ret = self.func() self.output_dict[ret['name']] = ret time.sleep(self.interval) return
python
def run(self): """ Calls the main function of a plugin and mutates the output dict with its return value. Provides an easy way to change the output whilst not needing to constantly poll a queue in another thread and allowing plugin's to manage their own intervals. """ self.running = True while self.running: ret = self.func() self.output_dict[ret['name']] = ret time.sleep(self.interval) return
[ "def", "run", "(", "self", ")", ":", "self", ".", "running", "=", "True", "while", "self", ".", "running", ":", "ret", "=", "self", ".", "func", "(", ")", "self", ".", "output_dict", "[", "ret", "[", "'name'", "]", "]", "=", "ret", "time", ".", "sleep", "(", "self", ".", "interval", ")", "return" ]
Calls the main function of a plugin and mutates the output dict with its return value. Provides an easy way to change the output whilst not needing to constantly poll a queue in another thread and allowing plugin's to manage their own intervals.
[ "Calls", "the", "main", "function", "of", "a", "plugin", "and", "mutates", "the", "output", "dict", "with", "its", "return", "value", ".", "Provides", "an", "easy", "way", "to", "change", "the", "output", "whilst", "not", "needing", "to", "constantly", "poll", "a", "queue", "in", "another", "thread", "and", "allowing", "plugin", "s", "to", "manage", "their", "own", "intervals", "." ]
train
https://github.com/HarveyHunt/i3situation/blob/3160a21006fcc6961f240988874e228a5ec6f18e/i3situation/core/plugin_manager.py#L43-L55
HarveyHunt/i3situation
i3situation/core/plugin_manager.py
ThreadManager.add_thread
def add_thread(self, func, interval): """ Creates a thread, starts it and then adds it to the thread pool. Func: Same as in the Thread class. Interval: Same as in the Thread class. """ t = Thread(func, interval, self.output_dict) t.start() self._thread_pool.append(t)
python
def add_thread(self, func, interval): """ Creates a thread, starts it and then adds it to the thread pool. Func: Same as in the Thread class. Interval: Same as in the Thread class. """ t = Thread(func, interval, self.output_dict) t.start() self._thread_pool.append(t)
[ "def", "add_thread", "(", "self", ",", "func", ",", "interval", ")", ":", "t", "=", "Thread", "(", "func", ",", "interval", ",", "self", ".", "output_dict", ")", "t", ".", "start", "(", ")", "self", ".", "_thread_pool", ".", "append", "(", "t", ")" ]
Creates a thread, starts it and then adds it to the thread pool. Func: Same as in the Thread class. Interval: Same as in the Thread class.
[ "Creates", "a", "thread", "starts", "it", "and", "then", "adds", "it", "to", "the", "thread", "pool", "." ]
train
https://github.com/HarveyHunt/i3situation/blob/3160a21006fcc6961f240988874e228a5ec6f18e/i3situation/core/plugin_manager.py#L84-L93
HarveyHunt/i3situation
i3situation/core/plugin_manager.py
PluginLoader._compile_files
def _compile_files(self): """ Compiles python plugin files in order to be processed by the loader. It compiles the plugins if they have been updated or haven't yet been compiled. """ for f in glob.glob(os.path.join(self.dir_path, '*.py')): # Check for compiled Python files that aren't in the __pycache__. if not os.path.isfile(os.path.join(self.dir_path, f + 'c')): compileall.compile_dir(self.dir_path, quiet=True) logging.debug('Compiled plugins as a new plugin has been added.') return # Recompile if there are newer plugins. elif os.path.getmtime(os.path.join(self.dir_path, f)) > os.path.getmtime( os.path.join(self.dir_path, f + 'c')): compileall.compile_dir(self.dir_path, quiet=True) logging.debug('Compiled plugins as a plugin has been changed.') return
python
def _compile_files(self): """ Compiles python plugin files in order to be processed by the loader. It compiles the plugins if they have been updated or haven't yet been compiled. """ for f in glob.glob(os.path.join(self.dir_path, '*.py')): # Check for compiled Python files that aren't in the __pycache__. if not os.path.isfile(os.path.join(self.dir_path, f + 'c')): compileall.compile_dir(self.dir_path, quiet=True) logging.debug('Compiled plugins as a new plugin has been added.') return # Recompile if there are newer plugins. elif os.path.getmtime(os.path.join(self.dir_path, f)) > os.path.getmtime( os.path.join(self.dir_path, f + 'c')): compileall.compile_dir(self.dir_path, quiet=True) logging.debug('Compiled plugins as a plugin has been changed.') return
[ "def", "_compile_files", "(", "self", ")", ":", "for", "f", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir_path", ",", "'*.py'", ")", ")", ":", "# Check for compiled Python files that aren't in the __pycache__.", "if", "not", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir_path", ",", "f", "+", "'c'", ")", ")", ":", "compileall", ".", "compile_dir", "(", "self", ".", "dir_path", ",", "quiet", "=", "True", ")", "logging", ".", "debug", "(", "'Compiled plugins as a new plugin has been added.'", ")", "return", "# Recompile if there are newer plugins.", "elif", "os", ".", "path", ".", "getmtime", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir_path", ",", "f", ")", ")", ">", "os", ".", "path", ".", "getmtime", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir_path", ",", "f", "+", "'c'", ")", ")", ":", "compileall", ".", "compile_dir", "(", "self", ".", "dir_path", ",", "quiet", "=", "True", ")", "logging", ".", "debug", "(", "'Compiled plugins as a plugin has been changed.'", ")", "return" ]
Compiles python plugin files in order to be processed by the loader. It compiles the plugins if they have been updated or haven't yet been compiled.
[ "Compiles", "python", "plugin", "files", "in", "order", "to", "be", "processed", "by", "the", "loader", ".", "It", "compiles", "the", "plugins", "if", "they", "have", "been", "updated", "or", "haven", "t", "yet", "been", "compiled", "." ]
train
https://github.com/HarveyHunt/i3situation/blob/3160a21006fcc6961f240988874e228a5ec6f18e/i3situation/core/plugin_manager.py#L125-L142
HarveyHunt/i3situation
i3situation/core/plugin_manager.py
PluginLoader._load_compiled
def _load_compiled(self, file_path): """ Accepts a path to a compiled plugin and returns a module object. file_path: A string that represents a complete file path to a compiled plugin. """ name = os.path.splitext(os.path.split(file_path)[-1])[0] plugin_directory = os.sep.join(os.path.split(file_path)[0:-1]) compiled_directory = os.path.join(plugin_directory, '__pycache__') # Use glob to autocomplete the filename. compiled_file = glob.glob(os.path.join(compiled_directory, (name + '.*')))[0] plugin = imp.load_compiled(name, compiled_file) return plugin
python
def _load_compiled(self, file_path): """ Accepts a path to a compiled plugin and returns a module object. file_path: A string that represents a complete file path to a compiled plugin. """ name = os.path.splitext(os.path.split(file_path)[-1])[0] plugin_directory = os.sep.join(os.path.split(file_path)[0:-1]) compiled_directory = os.path.join(plugin_directory, '__pycache__') # Use glob to autocomplete the filename. compiled_file = glob.glob(os.path.join(compiled_directory, (name + '.*')))[0] plugin = imp.load_compiled(name, compiled_file) return plugin
[ "def", "_load_compiled", "(", "self", ",", "file_path", ")", ":", "name", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "split", "(", "file_path", ")", "[", "-", "1", "]", ")", "[", "0", "]", "plugin_directory", "=", "os", ".", "sep", ".", "join", "(", "os", ".", "path", ".", "split", "(", "file_path", ")", "[", "0", ":", "-", "1", "]", ")", "compiled_directory", "=", "os", ".", "path", ".", "join", "(", "plugin_directory", ",", "'__pycache__'", ")", "# Use glob to autocomplete the filename.", "compiled_file", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "compiled_directory", ",", "(", "name", "+", "'.*'", ")", ")", ")", "[", "0", "]", "plugin", "=", "imp", ".", "load_compiled", "(", "name", ",", "compiled_file", ")", "return", "plugin" ]
Accepts a path to a compiled plugin and returns a module object. file_path: A string that represents a complete file path to a compiled plugin.
[ "Accepts", "a", "path", "to", "a", "compiled", "plugin", "and", "returns", "a", "module", "object", "." ]
train
https://github.com/HarveyHunt/i3situation/blob/3160a21006fcc6961f240988874e228a5ec6f18e/i3situation/core/plugin_manager.py#L144-L157
HarveyHunt/i3situation
i3situation/core/plugin_manager.py
PluginLoader.load_objects
def load_objects(self): """ Matches the plugins that have been specified in the config file with the available plugins. Returns instantiated objects based upon the classes defined in the plugins. """ objects = [] for settings in self._config: if settings['plugin'] in self.plugins: module = self.plugins[settings['plugin']] # Trusts that the only item in __all__ is the name of the # plugin class. plugin_class = getattr(module, module.__all__) objects.append(plugin_class(settings)) logging.debug('Loaded a plugin object based upon {0}'.format( settings['plugin'])) else: logging.critical('Missing plugin {0} was not found in {1}'.format( settings['plugin'], self.dir_path)) raise MissingPlugin('The plugin {0} was not found in {1}'.format( settings['plugin'], self.dir_path)) return objects
python
def load_objects(self): """ Matches the plugins that have been specified in the config file with the available plugins. Returns instantiated objects based upon the classes defined in the plugins. """ objects = [] for settings in self._config: if settings['plugin'] in self.plugins: module = self.plugins[settings['plugin']] # Trusts that the only item in __all__ is the name of the # plugin class. plugin_class = getattr(module, module.__all__) objects.append(plugin_class(settings)) logging.debug('Loaded a plugin object based upon {0}'.format( settings['plugin'])) else: logging.critical('Missing plugin {0} was not found in {1}'.format( settings['plugin'], self.dir_path)) raise MissingPlugin('The plugin {0} was not found in {1}'.format( settings['plugin'], self.dir_path)) return objects
[ "def", "load_objects", "(", "self", ")", ":", "objects", "=", "[", "]", "for", "settings", "in", "self", ".", "_config", ":", "if", "settings", "[", "'plugin'", "]", "in", "self", ".", "plugins", ":", "module", "=", "self", ".", "plugins", "[", "settings", "[", "'plugin'", "]", "]", "# Trusts that the only item in __all__ is the name of the", "# plugin class.", "plugin_class", "=", "getattr", "(", "module", ",", "module", ".", "__all__", ")", "objects", ".", "append", "(", "plugin_class", "(", "settings", ")", ")", "logging", ".", "debug", "(", "'Loaded a plugin object based upon {0}'", ".", "format", "(", "settings", "[", "'plugin'", "]", ")", ")", "else", ":", "logging", ".", "critical", "(", "'Missing plugin {0} was not found in {1}'", ".", "format", "(", "settings", "[", "'plugin'", "]", ",", "self", ".", "dir_path", ")", ")", "raise", "MissingPlugin", "(", "'The plugin {0} was not found in {1}'", ".", "format", "(", "settings", "[", "'plugin'", "]", ",", "self", ".", "dir_path", ")", ")", "return", "objects" ]
Matches the plugins that have been specified in the config file with the available plugins. Returns instantiated objects based upon the classes defined in the plugins.
[ "Matches", "the", "plugins", "that", "have", "been", "specified", "in", "the", "config", "file", "with", "the", "available", "plugins", ".", "Returns", "instantiated", "objects", "based", "upon", "the", "classes", "defined", "in", "the", "plugins", "." ]
train
https://github.com/HarveyHunt/i3situation/blob/3160a21006fcc6961f240988874e228a5ec6f18e/i3situation/core/plugin_manager.py#L159-L180
HarveyHunt/i3situation
i3situation/core/plugin_manager.py
PluginLoader.refresh_files
def refresh_files(self): """ Discovers the available plugins and turns each into a module object. This is a seperate function to allow plugins to be updated dynamically by other parts of the application. """ plugins = {} _plugin_files = glob.glob(os.path.join(self.dir_path, '[!_]*.pyc')) for f in glob.glob(os.path.join(self.dir_path, '[!_]*.py')): if not any(os.path.splitext(f)[0] == os.path.splitext(x)[0] for x in _plugin_files): logging.debug('Adding plugin {0}'.format(f)) _plugin_files.append(f) for f in _plugin_files: plugin = self._load_compiled(f) plugins[plugin.__name__] = plugin logging.debug('Loaded module object for plugin: {0}'.format(f)) return plugins
python
def refresh_files(self): """ Discovers the available plugins and turns each into a module object. This is a seperate function to allow plugins to be updated dynamically by other parts of the application. """ plugins = {} _plugin_files = glob.glob(os.path.join(self.dir_path, '[!_]*.pyc')) for f in glob.glob(os.path.join(self.dir_path, '[!_]*.py')): if not any(os.path.splitext(f)[0] == os.path.splitext(x)[0] for x in _plugin_files): logging.debug('Adding plugin {0}'.format(f)) _plugin_files.append(f) for f in _plugin_files: plugin = self._load_compiled(f) plugins[plugin.__name__] = plugin logging.debug('Loaded module object for plugin: {0}'.format(f)) return plugins
[ "def", "refresh_files", "(", "self", ")", ":", "plugins", "=", "{", "}", "_plugin_files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir_path", ",", "'[!_]*.pyc'", ")", ")", "for", "f", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir_path", ",", "'[!_]*.py'", ")", ")", ":", "if", "not", "any", "(", "os", ".", "path", ".", "splitext", "(", "f", ")", "[", "0", "]", "==", "os", ".", "path", ".", "splitext", "(", "x", ")", "[", "0", "]", "for", "x", "in", "_plugin_files", ")", ":", "logging", ".", "debug", "(", "'Adding plugin {0}'", ".", "format", "(", "f", ")", ")", "_plugin_files", ".", "append", "(", "f", ")", "for", "f", "in", "_plugin_files", ":", "plugin", "=", "self", ".", "_load_compiled", "(", "f", ")", "plugins", "[", "plugin", ".", "__name__", "]", "=", "plugin", "logging", ".", "debug", "(", "'Loaded module object for plugin: {0}'", ".", "format", "(", "f", ")", ")", "return", "plugins" ]
Discovers the available plugins and turns each into a module object. This is a seperate function to allow plugins to be updated dynamically by other parts of the application.
[ "Discovers", "the", "available", "plugins", "and", "turns", "each", "into", "a", "module", "object", ".", "This", "is", "a", "seperate", "function", "to", "allow", "plugins", "to", "be", "updated", "dynamically", "by", "other", "parts", "of", "the", "application", "." ]
train
https://github.com/HarveyHunt/i3situation/blob/3160a21006fcc6961f240988874e228a5ec6f18e/i3situation/core/plugin_manager.py#L182-L199
Alir3z4/django-databrowse
django_databrowse/sites.py
ModelDatabrowse.root
def root(self, request, url): """ Handles main URL routing for the databrowse app. `url` is the remainder of the URL -- e.g. 'objects/3'. """ # Delegate to the appropriate method, based on the URL. if url is None: return self.main_view(request) try: plugin_name, rest_of_url = url.split('/', 1) except ValueError: # need more than 1 value to unpack plugin_name, rest_of_url = url, None try: plugin = self.plugins[plugin_name] except KeyError: raise http.Http404('A plugin with the requested name ' 'does not exist.') return plugin.model_view(request, self, rest_of_url)
python
def root(self, request, url): """ Handles main URL routing for the databrowse app. `url` is the remainder of the URL -- e.g. 'objects/3'. """ # Delegate to the appropriate method, based on the URL. if url is None: return self.main_view(request) try: plugin_name, rest_of_url = url.split('/', 1) except ValueError: # need more than 1 value to unpack plugin_name, rest_of_url = url, None try: plugin = self.plugins[plugin_name] except KeyError: raise http.Http404('A plugin with the requested name ' 'does not exist.') return plugin.model_view(request, self, rest_of_url)
[ "def", "root", "(", "self", ",", "request", ",", "url", ")", ":", "# Delegate to the appropriate method, based on the URL.", "if", "url", "is", "None", ":", "return", "self", ".", "main_view", "(", "request", ")", "try", ":", "plugin_name", ",", "rest_of_url", "=", "url", ".", "split", "(", "'/'", ",", "1", ")", "except", "ValueError", ":", "# need more than 1 value to unpack", "plugin_name", ",", "rest_of_url", "=", "url", ",", "None", "try", ":", "plugin", "=", "self", ".", "plugins", "[", "plugin_name", "]", "except", "KeyError", ":", "raise", "http", ".", "Http404", "(", "'A plugin with the requested name '", "'does not exist.'", ")", "return", "plugin", ".", "model_view", "(", "request", ",", "self", ",", "rest_of_url", ")" ]
Handles main URL routing for the databrowse app. `url` is the remainder of the URL -- e.g. 'objects/3'.
[ "Handles", "main", "URL", "routing", "for", "the", "databrowse", "app", "." ]
train
https://github.com/Alir3z4/django-databrowse/blob/4469495cd47a0da506ddf4e8cc752c2f453e0339/django_databrowse/sites.py#L55-L73
Alir3z4/django-databrowse
django_databrowse/sites.py
DatabrowseSite.register
def register(self, *model_list, **options): """ Registers the given model(s) with the given databrowse site. The model(s) should be Model classes, not instances. If a databrowse class isn't given, it will use DefaultModelDatabrowse (the default databrowse options). If a model is already registered, this will raise AlreadyRegistered. """ databrowse_class = options.pop('databrowse_class', DefaultModelDatabrowse) for model in model_list: if model in self.registry: raise AlreadyRegistered('The model %s is already registered' % model.__name__) self.registry[model] = databrowse_class
python
def register(self, *model_list, **options): """ Registers the given model(s) with the given databrowse site. The model(s) should be Model classes, not instances. If a databrowse class isn't given, it will use DefaultModelDatabrowse (the default databrowse options). If a model is already registered, this will raise AlreadyRegistered. """ databrowse_class = options.pop('databrowse_class', DefaultModelDatabrowse) for model in model_list: if model in self.registry: raise AlreadyRegistered('The model %s is already registered' % model.__name__) self.registry[model] = databrowse_class
[ "def", "register", "(", "self", ",", "*", "model_list", ",", "*", "*", "options", ")", ":", "databrowse_class", "=", "options", ".", "pop", "(", "'databrowse_class'", ",", "DefaultModelDatabrowse", ")", "for", "model", "in", "model_list", ":", "if", "model", "in", "self", ".", "registry", ":", "raise", "AlreadyRegistered", "(", "'The model %s is already registered'", "%", "model", ".", "__name__", ")", "self", ".", "registry", "[", "model", "]", "=", "databrowse_class" ]
Registers the given model(s) with the given databrowse site. The model(s) should be Model classes, not instances. If a databrowse class isn't given, it will use DefaultModelDatabrowse (the default databrowse options). If a model is already registered, this will raise AlreadyRegistered.
[ "Registers", "the", "given", "model", "(", "s", ")", "with", "the", "given", "databrowse", "site", "." ]
train
https://github.com/Alir3z4/django-databrowse/blob/4469495cd47a0da506ddf4e8cc752c2f453e0339/django_databrowse/sites.py#L119-L136
Alir3z4/django-databrowse
django_databrowse/sites.py
DatabrowseSite.unregister
def unregister(self, *model_list): """ Unregisters the given model(s). If a model isn't already registered, this will raise NotRegistered. """ for model in model_list: if model not in self.registry: raise NotRegistered('The model %s is not registered' % model.__name__) del self.registry[model]
python
def unregister(self, *model_list): """ Unregisters the given model(s). If a model isn't already registered, this will raise NotRegistered. """ for model in model_list: if model not in self.registry: raise NotRegistered('The model %s is not registered' % model.__name__) del self.registry[model]
[ "def", "unregister", "(", "self", ",", "*", "model_list", ")", ":", "for", "model", "in", "model_list", ":", "if", "model", "not", "in", "self", ".", "registry", ":", "raise", "NotRegistered", "(", "'The model %s is not registered'", "%", "model", ".", "__name__", ")", "del", "self", ".", "registry", "[", "model", "]" ]
Unregisters the given model(s). If a model isn't already registered, this will raise NotRegistered.
[ "Unregisters", "the", "given", "model", "(", "s", ")", "." ]
train
https://github.com/Alir3z4/django-databrowse/blob/4469495cd47a0da506ddf4e8cc752c2f453e0339/django_databrowse/sites.py#L138-L148
Alir3z4/django-databrowse
django_databrowse/sites.py
DatabrowseSite.root
def root(self, request, url): """ Handles main URL routing for the databrowse app. `url` is the remainder of the URL -- e.g. 'comments/comment/'. """ self.root_url = request.path[:len(request.path) - len(url)] url = url.rstrip('/') # Trim trailing slash, if it exists. if url == '': return self.index(request) elif '/' in url: return self.model_page(request, *url.split('/', 2)) raise http.Http404('The requested databrowse page does not exist.')
python
def root(self, request, url): """ Handles main URL routing for the databrowse app. `url` is the remainder of the URL -- e.g. 'comments/comment/'. """ self.root_url = request.path[:len(request.path) - len(url)] url = url.rstrip('/') # Trim trailing slash, if it exists. if url == '': return self.index(request) elif '/' in url: return self.model_page(request, *url.split('/', 2)) raise http.Http404('The requested databrowse page does not exist.')
[ "def", "root", "(", "self", ",", "request", ",", "url", ")", ":", "self", ".", "root_url", "=", "request", ".", "path", "[", ":", "len", "(", "request", ".", "path", ")", "-", "len", "(", "url", ")", "]", "url", "=", "url", ".", "rstrip", "(", "'/'", ")", "# Trim trailing slash, if it exists.", "if", "url", "==", "''", ":", "return", "self", ".", "index", "(", "request", ")", "elif", "'/'", "in", "url", ":", "return", "self", ".", "model_page", "(", "request", ",", "*", "url", ".", "split", "(", "'/'", ",", "2", ")", ")", "raise", "http", ".", "Http404", "(", "'The requested databrowse page does not exist.'", ")" ]
Handles main URL routing for the databrowse app. `url` is the remainder of the URL -- e.g. 'comments/comment/'.
[ "Handles", "main", "URL", "routing", "for", "the", "databrowse", "app", "." ]
train
https://github.com/Alir3z4/django-databrowse/blob/4469495cd47a0da506ddf4e8cc752c2f453e0339/django_databrowse/sites.py#L150-L164
Alir3z4/django-databrowse
django_databrowse/sites.py
DatabrowseSite.model_page
def model_page(self, request, app_label, model_name, rest_of_url=None): """ Handles the model-specific functionality of the databrowse site, delegating<to the appropriate ModelDatabrowse class. """ try: model = get_model(app_label, model_name) except LookupError: model = None if model is None: raise http.Http404("App %r, model %r, not found." % (app_label, model_name)) try: databrowse_class = self.registry[model] except KeyError: raise http.Http404("This model exists but has not been registered " "with databrowse.") return databrowse_class(model, self).root(request, rest_of_url)
python
def model_page(self, request, app_label, model_name, rest_of_url=None): """ Handles the model-specific functionality of the databrowse site, delegating<to the appropriate ModelDatabrowse class. """ try: model = get_model(app_label, model_name) except LookupError: model = None if model is None: raise http.Http404("App %r, model %r, not found." % (app_label, model_name)) try: databrowse_class = self.registry[model] except KeyError: raise http.Http404("This model exists but has not been registered " "with databrowse.") return databrowse_class(model, self).root(request, rest_of_url)
[ "def", "model_page", "(", "self", ",", "request", ",", "app_label", ",", "model_name", ",", "rest_of_url", "=", "None", ")", ":", "try", ":", "model", "=", "get_model", "(", "app_label", ",", "model_name", ")", "except", "LookupError", ":", "model", "=", "None", "if", "model", "is", "None", ":", "raise", "http", ".", "Http404", "(", "\"App %r, model %r, not found.\"", "%", "(", "app_label", ",", "model_name", ")", ")", "try", ":", "databrowse_class", "=", "self", ".", "registry", "[", "model", "]", "except", "KeyError", ":", "raise", "http", ".", "Http404", "(", "\"This model exists but has not been registered \"", "\"with databrowse.\"", ")", "return", "databrowse_class", "(", "model", ",", "self", ")", ".", "root", "(", "request", ",", "rest_of_url", ")" ]
Handles the model-specific functionality of the databrowse site, delegating<to the appropriate ModelDatabrowse class.
[ "Handles", "the", "model", "-", "specific", "functionality", "of", "the", "databrowse", "site", "delegating<to", "the", "appropriate", "ModelDatabrowse", "class", "." ]
train
https://github.com/Alir3z4/django-databrowse/blob/4469495cd47a0da506ddf4e8cc752c2f453e0339/django_databrowse/sites.py#L173-L191
Alir3z4/django-databrowse
django_databrowse/datastructures.py
EasyInstanceField.values
def values(self): """ Returns a list of values for this field for this instance. It's a list so we can accomodate many-to-many fields. """ # This import is deliberately inside the function because it causes # some settings to be imported, and we don't want to do that at the # module level. if self.field.rel: if isinstance(self.field.rel, models.ManyToOneRel): objs = getattr(self.instance.instance, self.field.name) elif isinstance(self.field.rel, models.ManyToManyRel): # ManyToManyRel return list(getattr(self.instance.instance, self.field.name).all()) elif self.field.choices: objs = dict(self.field.choices).get(self.raw_value, EMPTY_VALUE) elif isinstance(self.field, models.DateField) or \ isinstance(self.field, models.TimeField): if self.raw_value: if isinstance(self.field, models.DateTimeField): objs = capfirst(formats.date_format(self.raw_value, 'DATETIME_FORMAT')) elif isinstance(self.field, models.TimeField): objs = capfirst(formats.time_format(self.raw_value, 'TIME_FORMAT')) else: objs = capfirst(formats.date_format(self.raw_value, 'DATE_FORMAT')) else: objs = EMPTY_VALUE elif isinstance(self.field, models.BooleanField) or \ isinstance(self.field, models.NullBooleanField): objs = {True: 'Yes', False: 'No', None: 'Unknown'}[self.raw_value] else: objs = self.raw_value return [objs]
python
def values(self): """ Returns a list of values for this field for this instance. It's a list so we can accomodate many-to-many fields. """ # This import is deliberately inside the function because it causes # some settings to be imported, and we don't want to do that at the # module level. if self.field.rel: if isinstance(self.field.rel, models.ManyToOneRel): objs = getattr(self.instance.instance, self.field.name) elif isinstance(self.field.rel, models.ManyToManyRel): # ManyToManyRel return list(getattr(self.instance.instance, self.field.name).all()) elif self.field.choices: objs = dict(self.field.choices).get(self.raw_value, EMPTY_VALUE) elif isinstance(self.field, models.DateField) or \ isinstance(self.field, models.TimeField): if self.raw_value: if isinstance(self.field, models.DateTimeField): objs = capfirst(formats.date_format(self.raw_value, 'DATETIME_FORMAT')) elif isinstance(self.field, models.TimeField): objs = capfirst(formats.time_format(self.raw_value, 'TIME_FORMAT')) else: objs = capfirst(formats.date_format(self.raw_value, 'DATE_FORMAT')) else: objs = EMPTY_VALUE elif isinstance(self.field, models.BooleanField) or \ isinstance(self.field, models.NullBooleanField): objs = {True: 'Yes', False: 'No', None: 'Unknown'}[self.raw_value] else: objs = self.raw_value return [objs]
[ "def", "values", "(", "self", ")", ":", "# This import is deliberately inside the function because it causes", "# some settings to be imported, and we don't want to do that at the", "# module level.", "if", "self", ".", "field", ".", "rel", ":", "if", "isinstance", "(", "self", ".", "field", ".", "rel", ",", "models", ".", "ManyToOneRel", ")", ":", "objs", "=", "getattr", "(", "self", ".", "instance", ".", "instance", ",", "self", ".", "field", ".", "name", ")", "elif", "isinstance", "(", "self", ".", "field", ".", "rel", ",", "models", ".", "ManyToManyRel", ")", ":", "# ManyToManyRel", "return", "list", "(", "getattr", "(", "self", ".", "instance", ".", "instance", ",", "self", ".", "field", ".", "name", ")", ".", "all", "(", ")", ")", "elif", "self", ".", "field", ".", "choices", ":", "objs", "=", "dict", "(", "self", ".", "field", ".", "choices", ")", ".", "get", "(", "self", ".", "raw_value", ",", "EMPTY_VALUE", ")", "elif", "isinstance", "(", "self", ".", "field", ",", "models", ".", "DateField", ")", "or", "isinstance", "(", "self", ".", "field", ",", "models", ".", "TimeField", ")", ":", "if", "self", ".", "raw_value", ":", "if", "isinstance", "(", "self", ".", "field", ",", "models", ".", "DateTimeField", ")", ":", "objs", "=", "capfirst", "(", "formats", ".", "date_format", "(", "self", ".", "raw_value", ",", "'DATETIME_FORMAT'", ")", ")", "elif", "isinstance", "(", "self", ".", "field", ",", "models", ".", "TimeField", ")", ":", "objs", "=", "capfirst", "(", "formats", ".", "time_format", "(", "self", ".", "raw_value", ",", "'TIME_FORMAT'", ")", ")", "else", ":", "objs", "=", "capfirst", "(", "formats", ".", "date_format", "(", "self", ".", "raw_value", ",", "'DATE_FORMAT'", ")", ")", "else", ":", "objs", "=", "EMPTY_VALUE", "elif", "isinstance", "(", "self", ".", "field", ",", "models", ".", "BooleanField", ")", "or", "isinstance", "(", "self", ".", "field", ",", "models", ".", "NullBooleanField", ")", ":", "objs", "=", "{", "True", ":", "'Yes'", ",", "False", ":", "'No'", ",", "None", ":", "'Unknown'", "}", "[", "self", ".", "raw_value", "]", "else", ":", "objs", "=", "self", ".", "raw_value", "return", "[", "objs", "]" ]
Returns a list of values for this field for this instance. It's a list so we can accomodate many-to-many fields.
[ "Returns", "a", "list", "of", "values", "for", "this", "field", "for", "this", "instance", ".", "It", "s", "a", "list", "so", "we", "can", "accomodate", "many", "-", "to", "-", "many", "fields", "." ]
train
https://github.com/Alir3z4/django-databrowse/blob/4469495cd47a0da506ddf4e8cc752c2f453e0339/django_databrowse/datastructures.py#L194-L230
Alir3z4/django-databrowse
django_databrowse/datastructures.py
EasyInstanceField.urls
def urls(self): "Returns a list of (value, URL) tuples." # First, check the urls() method for each plugin. plugin_urls = [] for plugin_name, plugin in \ self.model.model_databrowse().plugins.items(): urls = plugin.urls(plugin_name, self) if urls is not None: #plugin_urls.append(urls) values = self.values() return zip(self.values(), urls) if self.field.rel: m = EasyModel(self.model.site, self.field.rel.to) if self.field.rel.to in self.model.model_list: lst = [] for value in self.values(): if value is None: continue url = mark_safe('%s%s/%s/objects/%s/' % (self.model.site.root_url, m.model._meta.app_label, m.model._meta.model_name, iri_to_uri(value._get_pk_val()))) lst.append((smart_text(value), url)) else: lst = [(value, None) for value in self.values()] elif self.field.choices: lst = [] for value in self.values(): url = mark_safe('%s%s/%s/fields/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.model_name, self.field.name, iri_to_uri(self.raw_value))) lst.append((value, url)) elif isinstance(self.field, models.URLField): val = self.values()[0] lst = [(val, iri_to_uri(val))] else: lst = [(self.values()[0], None)] return lst
python
def urls(self): "Returns a list of (value, URL) tuples." # First, check the urls() method for each plugin. plugin_urls = [] for plugin_name, plugin in \ self.model.model_databrowse().plugins.items(): urls = plugin.urls(plugin_name, self) if urls is not None: #plugin_urls.append(urls) values = self.values() return zip(self.values(), urls) if self.field.rel: m = EasyModel(self.model.site, self.field.rel.to) if self.field.rel.to in self.model.model_list: lst = [] for value in self.values(): if value is None: continue url = mark_safe('%s%s/%s/objects/%s/' % (self.model.site.root_url, m.model._meta.app_label, m.model._meta.model_name, iri_to_uri(value._get_pk_val()))) lst.append((smart_text(value), url)) else: lst = [(value, None) for value in self.values()] elif self.field.choices: lst = [] for value in self.values(): url = mark_safe('%s%s/%s/fields/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.model_name, self.field.name, iri_to_uri(self.raw_value))) lst.append((value, url)) elif isinstance(self.field, models.URLField): val = self.values()[0] lst = [(val, iri_to_uri(val))] else: lst = [(self.values()[0], None)] return lst
[ "def", "urls", "(", "self", ")", ":", "# First, check the urls() method for each plugin.", "plugin_urls", "=", "[", "]", "for", "plugin_name", ",", "plugin", "in", "self", ".", "model", ".", "model_databrowse", "(", ")", ".", "plugins", ".", "items", "(", ")", ":", "urls", "=", "plugin", ".", "urls", "(", "plugin_name", ",", "self", ")", "if", "urls", "is", "not", "None", ":", "#plugin_urls.append(urls)", "values", "=", "self", ".", "values", "(", ")", "return", "zip", "(", "self", ".", "values", "(", ")", ",", "urls", ")", "if", "self", ".", "field", ".", "rel", ":", "m", "=", "EasyModel", "(", "self", ".", "model", ".", "site", ",", "self", ".", "field", ".", "rel", ".", "to", ")", "if", "self", ".", "field", ".", "rel", ".", "to", "in", "self", ".", "model", ".", "model_list", ":", "lst", "=", "[", "]", "for", "value", "in", "self", ".", "values", "(", ")", ":", "if", "value", "is", "None", ":", "continue", "url", "=", "mark_safe", "(", "'%s%s/%s/objects/%s/'", "%", "(", "self", ".", "model", ".", "site", ".", "root_url", ",", "m", ".", "model", ".", "_meta", ".", "app_label", ",", "m", ".", "model", ".", "_meta", ".", "model_name", ",", "iri_to_uri", "(", "value", ".", "_get_pk_val", "(", ")", ")", ")", ")", "lst", ".", "append", "(", "(", "smart_text", "(", "value", ")", ",", "url", ")", ")", "else", ":", "lst", "=", "[", "(", "value", ",", "None", ")", "for", "value", "in", "self", ".", "values", "(", ")", "]", "elif", "self", ".", "field", ".", "choices", ":", "lst", "=", "[", "]", "for", "value", "in", "self", ".", "values", "(", ")", ":", "url", "=", "mark_safe", "(", "'%s%s/%s/fields/%s/%s/'", "%", "(", "self", ".", "model", ".", "site", ".", "root_url", ",", "self", ".", "model", ".", "model", ".", "_meta", ".", "app_label", ",", "self", ".", "model", ".", "model", ".", "_meta", ".", "model_name", ",", "self", ".", "field", ".", "name", ",", "iri_to_uri", "(", "self", ".", "raw_value", ")", ")", ")", "lst", ".", "append", "(", "(", "value", ",", "url", ")", ")", "elif", "isinstance", "(", "self", ".", "field", ",", "models", ".", "URLField", ")", ":", "val", "=", "self", ".", "values", "(", ")", "[", "0", "]", "lst", "=", "[", "(", "val", ",", "iri_to_uri", "(", "val", ")", ")", "]", "else", ":", "lst", "=", "[", "(", "self", ".", "values", "(", ")", "[", "0", "]", ",", "None", ")", "]", "return", "lst" ]
Returns a list of (value, URL) tuples.
[ "Returns", "a", "list", "of", "(", "value", "URL", ")", "tuples", "." ]
train
https://github.com/Alir3z4/django-databrowse/blob/4469495cd47a0da506ddf4e8cc752c2f453e0339/django_databrowse/datastructures.py#L232-L273
Alir3z4/django-databrowse
django_databrowse/plugins/calendars.py
CalendarPlugin.field_dict
def field_dict(self, model): """ Helper function that returns a dictionary of all DateFields or DateTimeFields in the given model. If self.field_names is set, it takes that into account when building the dictionary. """ if self.field_names is None: return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField)]) else: return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField) and (f.name in self.field_names)])
python
def field_dict(self, model): """ Helper function that returns a dictionary of all DateFields or DateTimeFields in the given model. If self.field_names is set, it takes that into account when building the dictionary. """ if self.field_names is None: return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField)]) else: return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField) and (f.name in self.field_names)])
[ "def", "field_dict", "(", "self", ",", "model", ")", ":", "if", "self", ".", "field_names", "is", "None", ":", "return", "dict", "(", "[", "(", "f", ".", "name", ",", "f", ")", "for", "f", "in", "model", ".", "_meta", ".", "fields", "if", "isinstance", "(", "f", ",", "models", ".", "DateField", ")", "]", ")", "else", ":", "return", "dict", "(", "[", "(", "f", ".", "name", ",", "f", ")", "for", "f", "in", "model", ".", "_meta", ".", "fields", "if", "isinstance", "(", "f", ",", "models", ".", "DateField", ")", "and", "(", "f", ".", "name", "in", "self", ".", "field_names", ")", "]", ")" ]
Helper function that returns a dictionary of all DateFields or DateTimeFields in the given model. If self.field_names is set, it takes that into account when building the dictionary.
[ "Helper", "function", "that", "returns", "a", "dictionary", "of", "all", "DateFields", "or", "DateTimeFields", "in", "the", "given", "model", ".", "If", "self", ".", "field_names", "is", "set", "it", "takes", "that", "into", "account", "when", "building", "the", "dictionary", "." ]
train
https://github.com/Alir3z4/django-databrowse/blob/4469495cd47a0da506ddf4e8cc752c2f453e0339/django_databrowse/plugins/calendars.py#L51-L64
BD2KGenomics/toil-scripts
src/toil_scripts/adam_kmers/count_kmers.py
kmer_dag
def kmer_dag(job, input_file, output_path, kmer_length, spark_conf, workers, cores, memory, sudo): ''' Optionally launches a Spark cluster and then runs ADAM to count k-mers on an input file. :param job: Toil job :param input_file: URL/path to input file to count k-mers on :param output_path: URL/path to save k-mer counts at :param kmer_length: The length of k-mer substrings to count. :param spark_conf: Optional Spark configuration. If set, workers should \ not be set. :param workers: Optional number of Spark workers to launch. If set, \ spark_conf should not be set, and cores and memory should be set. :param cores: Number of cores per Spark worker. Must be set if workers is \ set. :param memory: Amount of memory to provided to Spark workers. Must be set \ if workers is set. :param sudo: Whether or not to run Spark containers with sudo. :type job: toil.Job :type input_file: string :type output_path: string :type kmer_length: int or string :type spark_conf: string or None :type workers: int or None :type cores: int or None :type memory: int or None :type sudo: boolean ''' require((spark_conf is not None and workers is None) or (workers is not None and cores is not None and memory is not None and spark_conf is not None), "Either worker count (--workers) must be defined or user must pass in Spark configuration (--spark-conf).") # if we do not have a spark configuration, then we must spawn a cluster if spark_conf is None: master_hostname = spawn_spark_cluster(job, sudo, workers, cores) else: spark_conf = shlex.split(spark_conf) job.addChildJobFn(download_count_upload, masterHostname, input_file, output_file, kmer_length, spark_conf, memory, sudo)
python
def kmer_dag(job, input_file, output_path, kmer_length, spark_conf, workers, cores, memory, sudo): ''' Optionally launches a Spark cluster and then runs ADAM to count k-mers on an input file. :param job: Toil job :param input_file: URL/path to input file to count k-mers on :param output_path: URL/path to save k-mer counts at :param kmer_length: The length of k-mer substrings to count. :param spark_conf: Optional Spark configuration. If set, workers should \ not be set. :param workers: Optional number of Spark workers to launch. If set, \ spark_conf should not be set, and cores and memory should be set. :param cores: Number of cores per Spark worker. Must be set if workers is \ set. :param memory: Amount of memory to provided to Spark workers. Must be set \ if workers is set. :param sudo: Whether or not to run Spark containers with sudo. :type job: toil.Job :type input_file: string :type output_path: string :type kmer_length: int or string :type spark_conf: string or None :type workers: int or None :type cores: int or None :type memory: int or None :type sudo: boolean ''' require((spark_conf is not None and workers is None) or (workers is not None and cores is not None and memory is not None and spark_conf is not None), "Either worker count (--workers) must be defined or user must pass in Spark configuration (--spark-conf).") # if we do not have a spark configuration, then we must spawn a cluster if spark_conf is None: master_hostname = spawn_spark_cluster(job, sudo, workers, cores) else: spark_conf = shlex.split(spark_conf) job.addChildJobFn(download_count_upload, masterHostname, input_file, output_file, kmer_length, spark_conf, memory, sudo)
[ "def", "kmer_dag", "(", "job", ",", "input_file", ",", "output_path", ",", "kmer_length", ",", "spark_conf", ",", "workers", ",", "cores", ",", "memory", ",", "sudo", ")", ":", "require", "(", "(", "spark_conf", "is", "not", "None", "and", "workers", "is", "None", ")", "or", "(", "workers", "is", "not", "None", "and", "cores", "is", "not", "None", "and", "memory", "is", "not", "None", "and", "spark_conf", "is", "not", "None", ")", ",", "\"Either worker count (--workers) must be defined or user must pass in Spark configuration (--spark-conf).\"", ")", "# if we do not have a spark configuration, then we must spawn a cluster", "if", "spark_conf", "is", "None", ":", "master_hostname", "=", "spawn_spark_cluster", "(", "job", ",", "sudo", ",", "workers", ",", "cores", ")", "else", ":", "spark_conf", "=", "shlex", ".", "split", "(", "spark_conf", ")", "job", ".", "addChildJobFn", "(", "download_count_upload", ",", "masterHostname", ",", "input_file", ",", "output_file", ",", "kmer_length", ",", "spark_conf", ",", "memory", ",", "sudo", ")" ]
Optionally launches a Spark cluster and then runs ADAM to count k-mers on an input file. :param job: Toil job :param input_file: URL/path to input file to count k-mers on :param output_path: URL/path to save k-mer counts at :param kmer_length: The length of k-mer substrings to count. :param spark_conf: Optional Spark configuration. If set, workers should \ not be set. :param workers: Optional number of Spark workers to launch. If set, \ spark_conf should not be set, and cores and memory should be set. :param cores: Number of cores per Spark worker. Must be set if workers is \ set. :param memory: Amount of memory to provided to Spark workers. Must be set \ if workers is set. :param sudo: Whether or not to run Spark containers with sudo. :type job: toil.Job :type input_file: string :type output_path: string :type kmer_length: int or string :type spark_conf: string or None :type workers: int or None :type cores: int or None :type memory: int or None :type sudo: boolean
[ "Optionally", "launches", "a", "Spark", "cluster", "and", "then", "runs", "ADAM", "to", "count", "k", "-", "mers", "on", "an", "input", "file", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_kmers/count_kmers.py#L20-L74
BD2KGenomics/toil-scripts
src/toil_scripts/adam_kmers/count_kmers.py
download_count_upload
def download_count_upload(job, master_ip, input_file, output_file, kmer_length, spark_conf, memory, sudo): ''' Runs k-mer counting. 1. If the input file is located in S3, the file is copied into HDFS. 2. If the input file is not in Parquet format, the file is converted into Parquet. 3. The k-mers are counted and saved as text. 4. If the output path is an S3 URL, the file is copied back to S3. :param job: Toil job :param input_file: URL/path to input file to count k-mers on :param output_file: URL/path to save k-mer counts at :param kmer_length: The length of k-mer substrings to count. :param spark_conf: Optional Spark configuration. If set, memory should \ not be set. :param memory: Amount of memory to provided to Spark workers. Must be set \ if spark_conf is not set. :param sudo: Whether or not to run Spark containers with sudo. :type job: toil.Job :type input_file: string :type output_file: string :type kmer_length: int or string :type spark_conf: list of string or None :type memory: int or None :type sudo: boolean ''' if master_ip is not None: hdfs_dir = "hdfs://{0}:{1}/".format(master_ip, HDFS_MASTER_PORT) else: _log.warn('Master IP is not set. If default filesystem is not set, jobs may fail.') hdfs_dir = "" # if the file isn't already in hdfs, copy it in hdfs_input_file = hdfs_dir if input_file.startswith("s3://"): # append the s3 file name to our hdfs path hdfs_input_file += input_file.split("/")[-1] # run the download _log.info("Downloading input file %s to %s.", input_file, hdfs_input_file) call_conductor(job, master_ip, input_file, hdfs_input_file, memory=memory, override_parameters=spark_conf) else: if not input_file.startswith("hdfs://"): _log.warn("If not in S3, input file (%s) expected to be in HDFS (%s).", input_file, hdfs_dir) # where are we writing the output to? is it going to a location in hdfs or not? run_upload = True hdfs_output_file = hdfs_dir + "kmer_output.txt" if output_file.startswith(hdfs_dir): run_upload = False hdfs_output_file = output_file # do we need to convert to adam? if (hdfs_input_file.endswith('.bam') or hdfs_input_file.endswith('.sam') or hdfs_input_file.endswith('.fq') or hdfs_input_file.endswith('.fastq')): hdfs_tmp_file = hdfs_input_file # change the file extension to adam hdfs_input_file = '.'.join(hdfs_input_file.split('.')[:-1].append('adam')) # convert the file _log.info('Converting %s into ADAM format at %s.', hdfs_tmp_file, hdfs_input_file) call_adam(job, master_ip, ['transform', hdfs_tmp_file, hdfs_input_file], memory=memory, override_parameters=spark_conf) # run k-mer counting _log.info('Counting %d-mers in %s, and saving to %s.', kmer_length, hdfs_input_file, hdfs_output_file) call_adam(job, master_ip, ['count_kmers', hdfs_input_file, hdfs_output_file, str(kmer_length)], memory=memory, override_parameters=spark_conf) # do we need to upload the file back? if so, run upload if run_upload: _log.info("Uploading output file %s to %s.", hdfs_output_file, output_file) call_conductor(job, master_ip, hdfs_output_file, output_file, memory=memory, override_parameters=spark_conf)
python
def download_count_upload(job, master_ip, input_file, output_file, kmer_length, spark_conf, memory, sudo): ''' Runs k-mer counting. 1. If the input file is located in S3, the file is copied into HDFS. 2. If the input file is not in Parquet format, the file is converted into Parquet. 3. The k-mers are counted and saved as text. 4. If the output path is an S3 URL, the file is copied back to S3. :param job: Toil job :param input_file: URL/path to input file to count k-mers on :param output_file: URL/path to save k-mer counts at :param kmer_length: The length of k-mer substrings to count. :param spark_conf: Optional Spark configuration. If set, memory should \ not be set. :param memory: Amount of memory to provided to Spark workers. Must be set \ if spark_conf is not set. :param sudo: Whether or not to run Spark containers with sudo. :type job: toil.Job :type input_file: string :type output_file: string :type kmer_length: int or string :type spark_conf: list of string or None :type memory: int or None :type sudo: boolean ''' if master_ip is not None: hdfs_dir = "hdfs://{0}:{1}/".format(master_ip, HDFS_MASTER_PORT) else: _log.warn('Master IP is not set. If default filesystem is not set, jobs may fail.') hdfs_dir = "" # if the file isn't already in hdfs, copy it in hdfs_input_file = hdfs_dir if input_file.startswith("s3://"): # append the s3 file name to our hdfs path hdfs_input_file += input_file.split("/")[-1] # run the download _log.info("Downloading input file %s to %s.", input_file, hdfs_input_file) call_conductor(job, master_ip, input_file, hdfs_input_file, memory=memory, override_parameters=spark_conf) else: if not input_file.startswith("hdfs://"): _log.warn("If not in S3, input file (%s) expected to be in HDFS (%s).", input_file, hdfs_dir) # where are we writing the output to? is it going to a location in hdfs or not? run_upload = True hdfs_output_file = hdfs_dir + "kmer_output.txt" if output_file.startswith(hdfs_dir): run_upload = False hdfs_output_file = output_file # do we need to convert to adam? if (hdfs_input_file.endswith('.bam') or hdfs_input_file.endswith('.sam') or hdfs_input_file.endswith('.fq') or hdfs_input_file.endswith('.fastq')): hdfs_tmp_file = hdfs_input_file # change the file extension to adam hdfs_input_file = '.'.join(hdfs_input_file.split('.')[:-1].append('adam')) # convert the file _log.info('Converting %s into ADAM format at %s.', hdfs_tmp_file, hdfs_input_file) call_adam(job, master_ip, ['transform', hdfs_tmp_file, hdfs_input_file], memory=memory, override_parameters=spark_conf) # run k-mer counting _log.info('Counting %d-mers in %s, and saving to %s.', kmer_length, hdfs_input_file, hdfs_output_file) call_adam(job, master_ip, ['count_kmers', hdfs_input_file, hdfs_output_file, str(kmer_length)], memory=memory, override_parameters=spark_conf) # do we need to upload the file back? if so, run upload if run_upload: _log.info("Uploading output file %s to %s.", hdfs_output_file, output_file) call_conductor(job, master_ip, hdfs_output_file, output_file, memory=memory, override_parameters=spark_conf)
[ "def", "download_count_upload", "(", "job", ",", "master_ip", ",", "input_file", ",", "output_file", ",", "kmer_length", ",", "spark_conf", ",", "memory", ",", "sudo", ")", ":", "if", "master_ip", "is", "not", "None", ":", "hdfs_dir", "=", "\"hdfs://{0}:{1}/\"", ".", "format", "(", "master_ip", ",", "HDFS_MASTER_PORT", ")", "else", ":", "_log", ".", "warn", "(", "'Master IP is not set. If default filesystem is not set, jobs may fail.'", ")", "hdfs_dir", "=", "\"\"", "# if the file isn't already in hdfs, copy it in", "hdfs_input_file", "=", "hdfs_dir", "if", "input_file", ".", "startswith", "(", "\"s3://\"", ")", ":", "# append the s3 file name to our hdfs path", "hdfs_input_file", "+=", "input_file", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "# run the download", "_log", ".", "info", "(", "\"Downloading input file %s to %s.\"", ",", "input_file", ",", "hdfs_input_file", ")", "call_conductor", "(", "job", ",", "master_ip", ",", "input_file", ",", "hdfs_input_file", ",", "memory", "=", "memory", ",", "override_parameters", "=", "spark_conf", ")", "else", ":", "if", "not", "input_file", ".", "startswith", "(", "\"hdfs://\"", ")", ":", "_log", ".", "warn", "(", "\"If not in S3, input file (%s) expected to be in HDFS (%s).\"", ",", "input_file", ",", "hdfs_dir", ")", "# where are we writing the output to? is it going to a location in hdfs or not?", "run_upload", "=", "True", "hdfs_output_file", "=", "hdfs_dir", "+", "\"kmer_output.txt\"", "if", "output_file", ".", "startswith", "(", "hdfs_dir", ")", ":", "run_upload", "=", "False", "hdfs_output_file", "=", "output_file", "# do we need to convert to adam?", "if", "(", "hdfs_input_file", ".", "endswith", "(", "'.bam'", ")", "or", "hdfs_input_file", ".", "endswith", "(", "'.sam'", ")", "or", "hdfs_input_file", ".", "endswith", "(", "'.fq'", ")", "or", "hdfs_input_file", ".", "endswith", "(", "'.fastq'", ")", ")", ":", "hdfs_tmp_file", "=", "hdfs_input_file", "# change the file extension to adam", "hdfs_input_file", "=", "'.'", ".", "join", "(", "hdfs_input_file", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ".", "append", "(", "'adam'", ")", ")", "# convert the file", "_log", ".", "info", "(", "'Converting %s into ADAM format at %s.'", ",", "hdfs_tmp_file", ",", "hdfs_input_file", ")", "call_adam", "(", "job", ",", "master_ip", ",", "[", "'transform'", ",", "hdfs_tmp_file", ",", "hdfs_input_file", "]", ",", "memory", "=", "memory", ",", "override_parameters", "=", "spark_conf", ")", "# run k-mer counting", "_log", ".", "info", "(", "'Counting %d-mers in %s, and saving to %s.'", ",", "kmer_length", ",", "hdfs_input_file", ",", "hdfs_output_file", ")", "call_adam", "(", "job", ",", "master_ip", ",", "[", "'count_kmers'", ",", "hdfs_input_file", ",", "hdfs_output_file", ",", "str", "(", "kmer_length", ")", "]", ",", "memory", "=", "memory", ",", "override_parameters", "=", "spark_conf", ")", "# do we need to upload the file back? if so, run upload", "if", "run_upload", ":", "_log", ".", "info", "(", "\"Uploading output file %s to %s.\"", ",", "hdfs_output_file", ",", "output_file", ")", "call_conductor", "(", "job", ",", "master_ip", ",", "hdfs_output_file", ",", "output_file", ",", "memory", "=", "memory", ",", "override_parameters", "=", "spark_conf", ")" ]
Runs k-mer counting. 1. If the input file is located in S3, the file is copied into HDFS. 2. If the input file is not in Parquet format, the file is converted into Parquet. 3. The k-mers are counted and saved as text. 4. If the output path is an S3 URL, the file is copied back to S3. :param job: Toil job :param input_file: URL/path to input file to count k-mers on :param output_file: URL/path to save k-mer counts at :param kmer_length: The length of k-mer substrings to count. :param spark_conf: Optional Spark configuration. If set, memory should \ not be set. :param memory: Amount of memory to provided to Spark workers. Must be set \ if spark_conf is not set. :param sudo: Whether or not to run Spark containers with sudo. :type job: toil.Job :type input_file: string :type output_file: string :type kmer_length: int or string :type spark_conf: list of string or None :type memory: int or None :type sudo: boolean
[ "Runs", "k", "-", "mer", "counting", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_kmers/count_kmers.py#L76-L172
BD2KGenomics/toil-scripts
src/toil_scripts/adam_kmers/count_kmers.py
main
def main(): ''' Sets up command line parser for Toil/ADAM based k-mer counter, and launches k-mer counter with optional Spark cluster. ''' parser = argparse.ArgumentParser() # add parser arguments parser.add_argument('--input_path', help='The full path to the input SAM/BAM/ADAM/FASTQ file.') parser.add_argument('--output-path', help='full path where final results will be output.') parser.add_argument('--kmer-length', help='Length to use for k-mer counting. Defaults to 20.', default=20, type=int) parser.add_argument('--spark-conf', help='Optional configuration to pass to Spark commands. Either this or --workers must be specified.', default=None) parser.add_argument('--memory', help='Optional memory configuration for Spark workers/driver. This must be specified if --workers is specified.', default=None, type=int) parser.add_argument('--cores', help='Optional core configuration for Spark workers/driver. This must be specified if --workers is specified.', default=None, type=int) parser.add_argument('--workers', help='Number of workers to spin up in Toil. Either this or --spark-conf must be specified. If this is specified, --memory and --cores must be specified.', default=None, type=int) parser.add_argument('--sudo', help='Run docker containers with sudo. Defaults to False.', default=False, action='store_true') Job.Runner.addToilOptions(parser) args = parser.parse_args() Job.Runner.startToil(Job.wrapJobFn(kmer_dag, args.kmer_length, args.input_path, args.output_path, args.spark_conf, args.workers, args.cores, args.memory, args.sudo, checkpoint=True), args)
python
def main(): ''' Sets up command line parser for Toil/ADAM based k-mer counter, and launches k-mer counter with optional Spark cluster. ''' parser = argparse.ArgumentParser() # add parser arguments parser.add_argument('--input_path', help='The full path to the input SAM/BAM/ADAM/FASTQ file.') parser.add_argument('--output-path', help='full path where final results will be output.') parser.add_argument('--kmer-length', help='Length to use for k-mer counting. Defaults to 20.', default=20, type=int) parser.add_argument('--spark-conf', help='Optional configuration to pass to Spark commands. Either this or --workers must be specified.', default=None) parser.add_argument('--memory', help='Optional memory configuration for Spark workers/driver. This must be specified if --workers is specified.', default=None, type=int) parser.add_argument('--cores', help='Optional core configuration for Spark workers/driver. This must be specified if --workers is specified.', default=None, type=int) parser.add_argument('--workers', help='Number of workers to spin up in Toil. Either this or --spark-conf must be specified. If this is specified, --memory and --cores must be specified.', default=None, type=int) parser.add_argument('--sudo', help='Run docker containers with sudo. Defaults to False.', default=False, action='store_true') Job.Runner.addToilOptions(parser) args = parser.parse_args() Job.Runner.startToil(Job.wrapJobFn(kmer_dag, args.kmer_length, args.input_path, args.output_path, args.spark_conf, args.workers, args.cores, args.memory, args.sudo, checkpoint=True), args)
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "# add parser arguments", "parser", ".", "add_argument", "(", "'--input_path'", ",", "help", "=", "'The full path to the input SAM/BAM/ADAM/FASTQ file.'", ")", "parser", ".", "add_argument", "(", "'--output-path'", ",", "help", "=", "'full path where final results will be output.'", ")", "parser", ".", "add_argument", "(", "'--kmer-length'", ",", "help", "=", "'Length to use for k-mer counting. Defaults to 20.'", ",", "default", "=", "20", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--spark-conf'", ",", "help", "=", "'Optional configuration to pass to Spark commands. Either this or --workers must be specified.'", ",", "default", "=", "None", ")", "parser", ".", "add_argument", "(", "'--memory'", ",", "help", "=", "'Optional memory configuration for Spark workers/driver. This must be specified if --workers is specified.'", ",", "default", "=", "None", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--cores'", ",", "help", "=", "'Optional core configuration for Spark workers/driver. This must be specified if --workers is specified.'", ",", "default", "=", "None", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--workers'", ",", "help", "=", "'Number of workers to spin up in Toil. Either this or --spark-conf must be specified. If this is specified, --memory and --cores must be specified.'", ",", "default", "=", "None", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--sudo'", ",", "help", "=", "'Run docker containers with sudo. Defaults to False.'", ",", "default", "=", "False", ",", "action", "=", "'store_true'", ")", "Job", ".", "Runner", ".", "addToilOptions", "(", "parser", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "Job", ".", "Runner", ".", "startToil", "(", "Job", ".", "wrapJobFn", "(", "kmer_dag", ",", "args", ".", "kmer_length", ",", "args", ".", "input_path", ",", "args", ".", "output_path", ",", "args", ".", "spark_conf", ",", "args", ".", "workers", ",", "args", ".", "cores", ",", "args", ".", "memory", ",", "args", ".", "sudo", ",", "checkpoint", "=", "True", ")", ",", "args", ")" ]
Sets up command line parser for Toil/ADAM based k-mer counter, and launches k-mer counter with optional Spark cluster.
[ "Sets", "up", "command", "line", "parser", "for", "Toil", "/", "ADAM", "based", "k", "-", "mer", "counter", "and", "launches", "k", "-", "mer", "counter", "with", "optional", "Spark", "cluster", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_kmers/count_kmers.py#L175-L223
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/germline.py
run_gatk_germline_pipeline
def run_gatk_germline_pipeline(job, samples, config): """ Downloads shared files and calls the GATK best practices germline pipeline for a cohort of samples :param JobFunctionWrappingJob job: passed automatically by Toil :param list[GermlineSample] samples: List of GermlineSample namedtuples :param Namespace config: Configuration options for pipeline Requires the following config attributes: config.preprocess_only If True, then stops pipeline after preprocessing steps config.joint_genotype If True, then joint genotypes cohort config.run_oncotator If True, then adds Oncotator to pipeline Additional parameters are needed for downstream steps. Refer to pipeline README for more information. """ # Determine the available disk space on a worker node before any jobs have been run. work_dir = job.fileStore.getLocalTempDir() st = os.statvfs(work_dir) config.available_disk = st.f_bavail * st.f_frsize # Check that there is a reasonable number of samples for joint genotyping num_samples = len(samples) if config.joint_genotype and not 30 < num_samples < 200: job.fileStore.logToMaster('WARNING: GATK recommends batches of ' '30 to 200 samples for joint genotyping. ' 'The current cohort has %d samples.' % num_samples) shared_files = Job.wrapJobFn(download_shared_files, config).encapsulate() job.addChild(shared_files) if config.preprocess_only: for sample in samples: shared_files.addChildJobFn(prepare_bam, sample.uuid, sample.url, shared_files.rv(), paired_url=sample.paired_url, rg_line=sample.rg_line) else: run_pipeline = Job.wrapJobFn(gatk_germline_pipeline, samples, shared_files.rv()).encapsulate() shared_files.addChild(run_pipeline) if config.run_oncotator: annotate = Job.wrapJobFn(annotate_vcfs, run_pipeline.rv(), shared_files.rv()) run_pipeline.addChild(annotate)
python
def run_gatk_germline_pipeline(job, samples, config): """ Downloads shared files and calls the GATK best practices germline pipeline for a cohort of samples :param JobFunctionWrappingJob job: passed automatically by Toil :param list[GermlineSample] samples: List of GermlineSample namedtuples :param Namespace config: Configuration options for pipeline Requires the following config attributes: config.preprocess_only If True, then stops pipeline after preprocessing steps config.joint_genotype If True, then joint genotypes cohort config.run_oncotator If True, then adds Oncotator to pipeline Additional parameters are needed for downstream steps. Refer to pipeline README for more information. """ # Determine the available disk space on a worker node before any jobs have been run. work_dir = job.fileStore.getLocalTempDir() st = os.statvfs(work_dir) config.available_disk = st.f_bavail * st.f_frsize # Check that there is a reasonable number of samples for joint genotyping num_samples = len(samples) if config.joint_genotype and not 30 < num_samples < 200: job.fileStore.logToMaster('WARNING: GATK recommends batches of ' '30 to 200 samples for joint genotyping. ' 'The current cohort has %d samples.' % num_samples) shared_files = Job.wrapJobFn(download_shared_files, config).encapsulate() job.addChild(shared_files) if config.preprocess_only: for sample in samples: shared_files.addChildJobFn(prepare_bam, sample.uuid, sample.url, shared_files.rv(), paired_url=sample.paired_url, rg_line=sample.rg_line) else: run_pipeline = Job.wrapJobFn(gatk_germline_pipeline, samples, shared_files.rv()).encapsulate() shared_files.addChild(run_pipeline) if config.run_oncotator: annotate = Job.wrapJobFn(annotate_vcfs, run_pipeline.rv(), shared_files.rv()) run_pipeline.addChild(annotate)
[ "def", "run_gatk_germline_pipeline", "(", "job", ",", "samples", ",", "config", ")", ":", "# Determine the available disk space on a worker node before any jobs have been run.", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "st", "=", "os", ".", "statvfs", "(", "work_dir", ")", "config", ".", "available_disk", "=", "st", ".", "f_bavail", "*", "st", ".", "f_frsize", "# Check that there is a reasonable number of samples for joint genotyping", "num_samples", "=", "len", "(", "samples", ")", "if", "config", ".", "joint_genotype", "and", "not", "30", "<", "num_samples", "<", "200", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'WARNING: GATK recommends batches of '", "'30 to 200 samples for joint genotyping. '", "'The current cohort has %d samples.'", "%", "num_samples", ")", "shared_files", "=", "Job", ".", "wrapJobFn", "(", "download_shared_files", ",", "config", ")", ".", "encapsulate", "(", ")", "job", ".", "addChild", "(", "shared_files", ")", "if", "config", ".", "preprocess_only", ":", "for", "sample", "in", "samples", ":", "shared_files", ".", "addChildJobFn", "(", "prepare_bam", ",", "sample", ".", "uuid", ",", "sample", ".", "url", ",", "shared_files", ".", "rv", "(", ")", ",", "paired_url", "=", "sample", ".", "paired_url", ",", "rg_line", "=", "sample", ".", "rg_line", ")", "else", ":", "run_pipeline", "=", "Job", ".", "wrapJobFn", "(", "gatk_germline_pipeline", ",", "samples", ",", "shared_files", ".", "rv", "(", ")", ")", ".", "encapsulate", "(", ")", "shared_files", ".", "addChild", "(", "run_pipeline", ")", "if", "config", ".", "run_oncotator", ":", "annotate", "=", "Job", ".", "wrapJobFn", "(", "annotate_vcfs", ",", "run_pipeline", ".", "rv", "(", ")", ",", "shared_files", ".", "rv", "(", ")", ")", "run_pipeline", ".", "addChild", "(", "annotate", ")" ]
Downloads shared files and calls the GATK best practices germline pipeline for a cohort of samples :param JobFunctionWrappingJob job: passed automatically by Toil :param list[GermlineSample] samples: List of GermlineSample namedtuples :param Namespace config: Configuration options for pipeline Requires the following config attributes: config.preprocess_only If True, then stops pipeline after preprocessing steps config.joint_genotype If True, then joint genotypes cohort config.run_oncotator If True, then adds Oncotator to pipeline Additional parameters are needed for downstream steps. Refer to pipeline README for more information.
[ "Downloads", "shared", "files", "and", "calls", "the", "GATK", "best", "practices", "germline", "pipeline", "for", "a", "cohort", "of", "samples" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L88-L132
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/germline.py
gatk_germline_pipeline
def gatk_germline_pipeline(job, samples, config): """ Runs the GATK best practices pipeline for germline SNP and INDEL discovery. Steps in Pipeline 0: Generate and preprocess BAM - Uploads processed BAM to output directory 1: Call Variants using HaplotypeCaller - Uploads GVCF 2: Genotype VCF - Uploads VCF 3: Filter Variants using either "hard filters" or VQSR - Uploads filtered VCF :param JobFunctionWrappingJob job: passed automatically by Toil :param list[GermlineSample] samples: List of GermlineSample namedtuples :param Namespace config: Input parameters and reference FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.cores Number of cores for each job config.xmx Java heap size in bytes config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.joint_genotype If True, then joint genotype and filter cohort config.hc_output URL or local path to HaplotypeCaller output for testing :return: Dictionary of filtered VCF FileStoreIDs :rtype: dict """ require(len(samples) > 0, 'No samples were provided!') # Get total size of genome reference files. This is used for configuring disk size. genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size # 0: Generate processed BAM and BAI files for each sample # group preprocessing and variant calling steps in empty Job instance group_bam_jobs = Job() gvcfs = {} for sample in samples: # 0: Generate processed BAM and BAI files for each sample get_bam = group_bam_jobs.addChildJobFn(prepare_bam, sample.uuid, sample.url, config, paired_url=sample.paired_url, rg_line=sample.rg_line) # 1: Generate per sample gvcfs {uuid: gvcf_id} # The HaplotypeCaller disk requirement depends on the input bam, bai, the genome reference # files, and the output GVCF file. The output GVCF is smaller than the input BAM file. hc_disk = PromisedRequirement(lambda bam, bai, ref_size: 2 * bam.size + bai.size + ref_size, get_bam.rv(0), get_bam.rv(1), genome_ref_size) get_gvcf = get_bam.addFollowOnJobFn(gatk_haplotype_caller, get_bam.rv(0), get_bam.rv(1), config.genome_fasta, config.genome_fai, config.genome_dict, annotations=config.annotations, cores=config.cores, disk=hc_disk, memory=config.xmx, hc_output=config.hc_output) # Store cohort GVCFs in dictionary gvcfs[sample.uuid] = get_gvcf.rv() # Upload individual sample GVCF before genotyping to a sample specific output directory vqsr_name = '{}{}.g.vcf'.format(sample.uuid, config.suffix) get_gvcf.addChildJobFn(output_file_job, vqsr_name, get_gvcf.rv(), os.path.join(config.output_dir, sample.uuid), s3_key_path=config.ssec, disk=PromisedRequirement(lambda x: x.size, get_gvcf.rv())) # VQSR requires many variants in order to train a decent model. GATK recommends a minimum of # 30 exomes or one large WGS sample: # https://software.broadinstitute.org/gatk/documentation/article?id=3225 filtered_vcfs = {} if config.joint_genotype: # Need to configure joint genotype in a separate function to resolve promises filtered_vcfs = group_bam_jobs.addFollowOnJobFn(joint_genotype_and_filter, gvcfs, config).rv() # If not joint genotyping, then iterate over cohort and genotype and filter individually. else: for uuid, gvcf_id in gvcfs.iteritems(): filtered_vcfs[uuid] = group_bam_jobs.addFollowOnJobFn(genotype_and_filter, {uuid: gvcf_id}, config).rv() job.addChild(group_bam_jobs) return filtered_vcfs
python
def gatk_germline_pipeline(job, samples, config): """ Runs the GATK best practices pipeline for germline SNP and INDEL discovery. Steps in Pipeline 0: Generate and preprocess BAM - Uploads processed BAM to output directory 1: Call Variants using HaplotypeCaller - Uploads GVCF 2: Genotype VCF - Uploads VCF 3: Filter Variants using either "hard filters" or VQSR - Uploads filtered VCF :param JobFunctionWrappingJob job: passed automatically by Toil :param list[GermlineSample] samples: List of GermlineSample namedtuples :param Namespace config: Input parameters and reference FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.cores Number of cores for each job config.xmx Java heap size in bytes config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.joint_genotype If True, then joint genotype and filter cohort config.hc_output URL or local path to HaplotypeCaller output for testing :return: Dictionary of filtered VCF FileStoreIDs :rtype: dict """ require(len(samples) > 0, 'No samples were provided!') # Get total size of genome reference files. This is used for configuring disk size. genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size # 0: Generate processed BAM and BAI files for each sample # group preprocessing and variant calling steps in empty Job instance group_bam_jobs = Job() gvcfs = {} for sample in samples: # 0: Generate processed BAM and BAI files for each sample get_bam = group_bam_jobs.addChildJobFn(prepare_bam, sample.uuid, sample.url, config, paired_url=sample.paired_url, rg_line=sample.rg_line) # 1: Generate per sample gvcfs {uuid: gvcf_id} # The HaplotypeCaller disk requirement depends on the input bam, bai, the genome reference # files, and the output GVCF file. The output GVCF is smaller than the input BAM file. hc_disk = PromisedRequirement(lambda bam, bai, ref_size: 2 * bam.size + bai.size + ref_size, get_bam.rv(0), get_bam.rv(1), genome_ref_size) get_gvcf = get_bam.addFollowOnJobFn(gatk_haplotype_caller, get_bam.rv(0), get_bam.rv(1), config.genome_fasta, config.genome_fai, config.genome_dict, annotations=config.annotations, cores=config.cores, disk=hc_disk, memory=config.xmx, hc_output=config.hc_output) # Store cohort GVCFs in dictionary gvcfs[sample.uuid] = get_gvcf.rv() # Upload individual sample GVCF before genotyping to a sample specific output directory vqsr_name = '{}{}.g.vcf'.format(sample.uuid, config.suffix) get_gvcf.addChildJobFn(output_file_job, vqsr_name, get_gvcf.rv(), os.path.join(config.output_dir, sample.uuid), s3_key_path=config.ssec, disk=PromisedRequirement(lambda x: x.size, get_gvcf.rv())) # VQSR requires many variants in order to train a decent model. GATK recommends a minimum of # 30 exomes or one large WGS sample: # https://software.broadinstitute.org/gatk/documentation/article?id=3225 filtered_vcfs = {} if config.joint_genotype: # Need to configure joint genotype in a separate function to resolve promises filtered_vcfs = group_bam_jobs.addFollowOnJobFn(joint_genotype_and_filter, gvcfs, config).rv() # If not joint genotyping, then iterate over cohort and genotype and filter individually. else: for uuid, gvcf_id in gvcfs.iteritems(): filtered_vcfs[uuid] = group_bam_jobs.addFollowOnJobFn(genotype_and_filter, {uuid: gvcf_id}, config).rv() job.addChild(group_bam_jobs) return filtered_vcfs
[ "def", "gatk_germline_pipeline", "(", "job", ",", "samples", ",", "config", ")", ":", "require", "(", "len", "(", "samples", ")", ">", "0", ",", "'No samples were provided!'", ")", "# Get total size of genome reference files. This is used for configuring disk size.", "genome_ref_size", "=", "config", ".", "genome_fasta", ".", "size", "+", "config", ".", "genome_fai", ".", "size", "+", "config", ".", "genome_dict", ".", "size", "# 0: Generate processed BAM and BAI files for each sample", "# group preprocessing and variant calling steps in empty Job instance", "group_bam_jobs", "=", "Job", "(", ")", "gvcfs", "=", "{", "}", "for", "sample", "in", "samples", ":", "# 0: Generate processed BAM and BAI files for each sample", "get_bam", "=", "group_bam_jobs", ".", "addChildJobFn", "(", "prepare_bam", ",", "sample", ".", "uuid", ",", "sample", ".", "url", ",", "config", ",", "paired_url", "=", "sample", ".", "paired_url", ",", "rg_line", "=", "sample", ".", "rg_line", ")", "# 1: Generate per sample gvcfs {uuid: gvcf_id}", "# The HaplotypeCaller disk requirement depends on the input bam, bai, the genome reference", "# files, and the output GVCF file. The output GVCF is smaller than the input BAM file.", "hc_disk", "=", "PromisedRequirement", "(", "lambda", "bam", ",", "bai", ",", "ref_size", ":", "2", "*", "bam", ".", "size", "+", "bai", ".", "size", "+", "ref_size", ",", "get_bam", ".", "rv", "(", "0", ")", ",", "get_bam", ".", "rv", "(", "1", ")", ",", "genome_ref_size", ")", "get_gvcf", "=", "get_bam", ".", "addFollowOnJobFn", "(", "gatk_haplotype_caller", ",", "get_bam", ".", "rv", "(", "0", ")", ",", "get_bam", ".", "rv", "(", "1", ")", ",", "config", ".", "genome_fasta", ",", "config", ".", "genome_fai", ",", "config", ".", "genome_dict", ",", "annotations", "=", "config", ".", "annotations", ",", "cores", "=", "config", ".", "cores", ",", "disk", "=", "hc_disk", ",", "memory", "=", "config", ".", "xmx", ",", "hc_output", "=", "config", ".", "hc_output", ")", "# Store cohort GVCFs in dictionary", "gvcfs", "[", "sample", ".", "uuid", "]", "=", "get_gvcf", ".", "rv", "(", ")", "# Upload individual sample GVCF before genotyping to a sample specific output directory", "vqsr_name", "=", "'{}{}.g.vcf'", ".", "format", "(", "sample", ".", "uuid", ",", "config", ".", "suffix", ")", "get_gvcf", ".", "addChildJobFn", "(", "output_file_job", ",", "vqsr_name", ",", "get_gvcf", ".", "rv", "(", ")", ",", "os", ".", "path", ".", "join", "(", "config", ".", "output_dir", ",", "sample", ".", "uuid", ")", ",", "s3_key_path", "=", "config", ".", "ssec", ",", "disk", "=", "PromisedRequirement", "(", "lambda", "x", ":", "x", ".", "size", ",", "get_gvcf", ".", "rv", "(", ")", ")", ")", "# VQSR requires many variants in order to train a decent model. GATK recommends a minimum of", "# 30 exomes or one large WGS sample:", "# https://software.broadinstitute.org/gatk/documentation/article?id=3225", "filtered_vcfs", "=", "{", "}", "if", "config", ".", "joint_genotype", ":", "# Need to configure joint genotype in a separate function to resolve promises", "filtered_vcfs", "=", "group_bam_jobs", ".", "addFollowOnJobFn", "(", "joint_genotype_and_filter", ",", "gvcfs", ",", "config", ")", ".", "rv", "(", ")", "# If not joint genotyping, then iterate over cohort and genotype and filter individually.", "else", ":", "for", "uuid", ",", "gvcf_id", "in", "gvcfs", ".", "iteritems", "(", ")", ":", "filtered_vcfs", "[", "uuid", "]", "=", "group_bam_jobs", ".", "addFollowOnJobFn", "(", "genotype_and_filter", ",", "{", "uuid", ":", "gvcf_id", "}", ",", "config", ")", ".", "rv", "(", ")", "job", ".", "addChild", "(", "group_bam_jobs", ")", "return", "filtered_vcfs" ]
Runs the GATK best practices pipeline for germline SNP and INDEL discovery. Steps in Pipeline 0: Generate and preprocess BAM - Uploads processed BAM to output directory 1: Call Variants using HaplotypeCaller - Uploads GVCF 2: Genotype VCF - Uploads VCF 3: Filter Variants using either "hard filters" or VQSR - Uploads filtered VCF :param JobFunctionWrappingJob job: passed automatically by Toil :param list[GermlineSample] samples: List of GermlineSample namedtuples :param Namespace config: Input parameters and reference FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.cores Number of cores for each job config.xmx Java heap size in bytes config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.joint_genotype If True, then joint genotype and filter cohort config.hc_output URL or local path to HaplotypeCaller output for testing :return: Dictionary of filtered VCF FileStoreIDs :rtype: dict
[ "Runs", "the", "GATK", "best", "practices", "pipeline", "for", "germline", "SNP", "and", "INDEL", "discovery", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L135-L233
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/germline.py
joint_genotype_and_filter
def joint_genotype_and_filter(job, gvcfs, config): """ Checks for enough disk space for joint genotyping, then calls the genotype and filter pipeline function. :param JobFunctionWrappingJob job: passed automatically by Toil :param dict gvcfs: Dictionary of GVCFs {Sample ID: FileStoreID} :param Namespace config: Input parameters and reference FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.available_disk Total available disk space :returns: FileStoreID for the joint genotyped and filtered VCF file :rtype: str """ # Get the total size of genome reference files genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size # Require at least 2.5x the sum of the individual GVCF files cohort_size = sum(gvcf.size for gvcf in gvcfs.values()) require(int(2.5 * cohort_size + genome_ref_size) < config.available_disk, 'There is not enough disk space to joint ' 'genotype samples:\n{}'.format('\n'.join(gvcfs.keys()))) job.fileStore.logToMaster('Merging cohort into a single GVCF file') return job.addChildJobFn(genotype_and_filter, gvcfs, config).rv()
python
def joint_genotype_and_filter(job, gvcfs, config): """ Checks for enough disk space for joint genotyping, then calls the genotype and filter pipeline function. :param JobFunctionWrappingJob job: passed automatically by Toil :param dict gvcfs: Dictionary of GVCFs {Sample ID: FileStoreID} :param Namespace config: Input parameters and reference FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.available_disk Total available disk space :returns: FileStoreID for the joint genotyped and filtered VCF file :rtype: str """ # Get the total size of genome reference files genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size # Require at least 2.5x the sum of the individual GVCF files cohort_size = sum(gvcf.size for gvcf in gvcfs.values()) require(int(2.5 * cohort_size + genome_ref_size) < config.available_disk, 'There is not enough disk space to joint ' 'genotype samples:\n{}'.format('\n'.join(gvcfs.keys()))) job.fileStore.logToMaster('Merging cohort into a single GVCF file') return job.addChildJobFn(genotype_and_filter, gvcfs, config).rv()
[ "def", "joint_genotype_and_filter", "(", "job", ",", "gvcfs", ",", "config", ")", ":", "# Get the total size of genome reference files", "genome_ref_size", "=", "config", ".", "genome_fasta", ".", "size", "+", "config", ".", "genome_fai", ".", "size", "+", "config", ".", "genome_dict", ".", "size", "# Require at least 2.5x the sum of the individual GVCF files", "cohort_size", "=", "sum", "(", "gvcf", ".", "size", "for", "gvcf", "in", "gvcfs", ".", "values", "(", ")", ")", "require", "(", "int", "(", "2.5", "*", "cohort_size", "+", "genome_ref_size", ")", "<", "config", ".", "available_disk", ",", "'There is not enough disk space to joint '", "'genotype samples:\\n{}'", ".", "format", "(", "'\\n'", ".", "join", "(", "gvcfs", ".", "keys", "(", ")", ")", ")", ")", "job", ".", "fileStore", ".", "logToMaster", "(", "'Merging cohort into a single GVCF file'", ")", "return", "job", ".", "addChildJobFn", "(", "genotype_and_filter", ",", "gvcfs", ",", "config", ")", ".", "rv", "(", ")" ]
Checks for enough disk space for joint genotyping, then calls the genotype and filter pipeline function. :param JobFunctionWrappingJob job: passed automatically by Toil :param dict gvcfs: Dictionary of GVCFs {Sample ID: FileStoreID} :param Namespace config: Input parameters and reference FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.available_disk Total available disk space :returns: FileStoreID for the joint genotyped and filtered VCF file :rtype: str
[ "Checks", "for", "enough", "disk", "space", "for", "joint", "genotyping", "then", "calls", "the", "genotype", "and", "filter", "pipeline", "function", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L236-L262
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/germline.py
genotype_and_filter
def genotype_and_filter(job, gvcfs, config): """ Genotypes one or more GVCF files and runs either the VQSR or hard filtering pipeline. Uploads the genotyped VCF file to the config output directory. :param JobFunctionWrappingJob job: passed automatically by Toil :param dict gvcfs: Dictionary of GVCFs {Sample ID: FileStoreID} :param Namespace config: Input parameters and shared FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.cores Number of cores for each job config.xmx Java heap size in bytes config.unsafe_mode If True, then run GATK tools in UNSAFE mode :return: FileStoreID for genotyped and filtered VCF file :rtype: str """ # Get the total size of the genome reference genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size # GenotypeGVCF disk requirement depends on the input GVCF, the genome reference files, and # the output VCF file. The output VCF is smaller than the input GVCF. genotype_gvcf_disk = PromisedRequirement(lambda gvcf_ids, ref_size: 2 * sum(gvcf_.size for gvcf_ in gvcf_ids) + ref_size, gvcfs.values(), genome_ref_size) genotype_gvcf = job.addChildJobFn(gatk_genotype_gvcfs, gvcfs, config.genome_fasta, config.genome_fai, config.genome_dict, annotations=config.annotations, unsafe_mode=config.unsafe_mode, cores=config.cores, disk=genotype_gvcf_disk, memory=config.xmx) # Determine if output GVCF has multiple samples if len(gvcfs) == 1: uuid = gvcfs.keys()[0] else: uuid = 'joint_genotyped' genotyped_filename = '%s.genotyped%s.vcf' % (uuid, config.suffix) genotype_gvcf.addChildJobFn(output_file_job, genotyped_filename, genotype_gvcf.rv(), os.path.join(config.output_dir, uuid), s3_key_path=config.ssec, disk=PromisedRequirement(lambda x: x.size, genotype_gvcf.rv())) if config.run_vqsr: if not config.joint_genotype: job.fileStore.logToMaster('WARNING: Running VQSR without joint genotyping.') joint_genotype_vcf = genotype_gvcf.addFollowOnJobFn(vqsr_pipeline, uuid, genotype_gvcf.rv(), config) else: joint_genotype_vcf = genotype_gvcf.addFollowOnJobFn(hard_filter_pipeline, uuid, genotype_gvcf.rv(), config) return joint_genotype_vcf.rv()
python
def genotype_and_filter(job, gvcfs, config): """ Genotypes one or more GVCF files and runs either the VQSR or hard filtering pipeline. Uploads the genotyped VCF file to the config output directory. :param JobFunctionWrappingJob job: passed automatically by Toil :param dict gvcfs: Dictionary of GVCFs {Sample ID: FileStoreID} :param Namespace config: Input parameters and shared FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.cores Number of cores for each job config.xmx Java heap size in bytes config.unsafe_mode If True, then run GATK tools in UNSAFE mode :return: FileStoreID for genotyped and filtered VCF file :rtype: str """ # Get the total size of the genome reference genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size # GenotypeGVCF disk requirement depends on the input GVCF, the genome reference files, and # the output VCF file. The output VCF is smaller than the input GVCF. genotype_gvcf_disk = PromisedRequirement(lambda gvcf_ids, ref_size: 2 * sum(gvcf_.size for gvcf_ in gvcf_ids) + ref_size, gvcfs.values(), genome_ref_size) genotype_gvcf = job.addChildJobFn(gatk_genotype_gvcfs, gvcfs, config.genome_fasta, config.genome_fai, config.genome_dict, annotations=config.annotations, unsafe_mode=config.unsafe_mode, cores=config.cores, disk=genotype_gvcf_disk, memory=config.xmx) # Determine if output GVCF has multiple samples if len(gvcfs) == 1: uuid = gvcfs.keys()[0] else: uuid = 'joint_genotyped' genotyped_filename = '%s.genotyped%s.vcf' % (uuid, config.suffix) genotype_gvcf.addChildJobFn(output_file_job, genotyped_filename, genotype_gvcf.rv(), os.path.join(config.output_dir, uuid), s3_key_path=config.ssec, disk=PromisedRequirement(lambda x: x.size, genotype_gvcf.rv())) if config.run_vqsr: if not config.joint_genotype: job.fileStore.logToMaster('WARNING: Running VQSR without joint genotyping.') joint_genotype_vcf = genotype_gvcf.addFollowOnJobFn(vqsr_pipeline, uuid, genotype_gvcf.rv(), config) else: joint_genotype_vcf = genotype_gvcf.addFollowOnJobFn(hard_filter_pipeline, uuid, genotype_gvcf.rv(), config) return joint_genotype_vcf.rv()
[ "def", "genotype_and_filter", "(", "job", ",", "gvcfs", ",", "config", ")", ":", "# Get the total size of the genome reference", "genome_ref_size", "=", "config", ".", "genome_fasta", ".", "size", "+", "config", ".", "genome_fai", ".", "size", "+", "config", ".", "genome_dict", ".", "size", "# GenotypeGVCF disk requirement depends on the input GVCF, the genome reference files, and", "# the output VCF file. The output VCF is smaller than the input GVCF.", "genotype_gvcf_disk", "=", "PromisedRequirement", "(", "lambda", "gvcf_ids", ",", "ref_size", ":", "2", "*", "sum", "(", "gvcf_", ".", "size", "for", "gvcf_", "in", "gvcf_ids", ")", "+", "ref_size", ",", "gvcfs", ".", "values", "(", ")", ",", "genome_ref_size", ")", "genotype_gvcf", "=", "job", ".", "addChildJobFn", "(", "gatk_genotype_gvcfs", ",", "gvcfs", ",", "config", ".", "genome_fasta", ",", "config", ".", "genome_fai", ",", "config", ".", "genome_dict", ",", "annotations", "=", "config", ".", "annotations", ",", "unsafe_mode", "=", "config", ".", "unsafe_mode", ",", "cores", "=", "config", ".", "cores", ",", "disk", "=", "genotype_gvcf_disk", ",", "memory", "=", "config", ".", "xmx", ")", "# Determine if output GVCF has multiple samples", "if", "len", "(", "gvcfs", ")", "==", "1", ":", "uuid", "=", "gvcfs", ".", "keys", "(", ")", "[", "0", "]", "else", ":", "uuid", "=", "'joint_genotyped'", "genotyped_filename", "=", "'%s.genotyped%s.vcf'", "%", "(", "uuid", ",", "config", ".", "suffix", ")", "genotype_gvcf", ".", "addChildJobFn", "(", "output_file_job", ",", "genotyped_filename", ",", "genotype_gvcf", ".", "rv", "(", ")", ",", "os", ".", "path", ".", "join", "(", "config", ".", "output_dir", ",", "uuid", ")", ",", "s3_key_path", "=", "config", ".", "ssec", ",", "disk", "=", "PromisedRequirement", "(", "lambda", "x", ":", "x", ".", "size", ",", "genotype_gvcf", ".", "rv", "(", ")", ")", ")", "if", "config", ".", "run_vqsr", ":", "if", "not", "config", ".", "joint_genotype", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'WARNING: Running VQSR without joint genotyping.'", ")", "joint_genotype_vcf", "=", "genotype_gvcf", ".", "addFollowOnJobFn", "(", "vqsr_pipeline", ",", "uuid", ",", "genotype_gvcf", ".", "rv", "(", ")", ",", "config", ")", "else", ":", "joint_genotype_vcf", "=", "genotype_gvcf", ".", "addFollowOnJobFn", "(", "hard_filter_pipeline", ",", "uuid", ",", "genotype_gvcf", ".", "rv", "(", ")", ",", "config", ")", "return", "joint_genotype_vcf", ".", "rv", "(", ")" ]
Genotypes one or more GVCF files and runs either the VQSR or hard filtering pipeline. Uploads the genotyped VCF file to the config output directory. :param JobFunctionWrappingJob job: passed automatically by Toil :param dict gvcfs: Dictionary of GVCFs {Sample ID: FileStoreID} :param Namespace config: Input parameters and shared FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.cores Number of cores for each job config.xmx Java heap size in bytes config.unsafe_mode If True, then run GATK tools in UNSAFE mode :return: FileStoreID for genotyped and filtered VCF file :rtype: str
[ "Genotypes", "one", "or", "more", "GVCF", "files", "and", "runs", "either", "the", "VQSR", "or", "hard", "filtering", "pipeline", ".", "Uploads", "the", "genotyped", "VCF", "file", "to", "the", "config", "output", "directory", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L265-L333
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/germline.py
annotate_vcfs
def annotate_vcfs(job, vcfs, config): """ Runs Oncotator for a group of VCF files. Each sample is annotated individually. :param JobFunctionWrappingJob job: passed automatically by Toil :param dict vcfs: Dictionary of VCF FileStoreIDs {Sample identifier: FileStoreID} :param Namespace config: Input parameters and shared FileStoreIDs Requires the following config attributes: config.oncotator_db FileStoreID to Oncotator database config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.cores Number of cores for each job config.xmx Java heap size in bytes """ job.fileStore.logToMaster('Running Oncotator on the following samples:\n%s' % '\n'.join(vcfs.keys())) for uuid, vcf_id in vcfs.iteritems(): # The Oncotator disk requirement depends on the input VCF, the Oncotator database # and the output VCF. The annotated VCF will be significantly larger than the input VCF. onco_disk = PromisedRequirement(lambda vcf, db: 3 * vcf.size + db.size, vcf_id, config.oncotator_db) annotated_vcf = job.addChildJobFn(run_oncotator, vcf_id, config.oncotator_db, disk=onco_disk, cores=config.cores, memory=config.xmx) output_dir = os.path.join(config.output_dir, uuid) filename = '{}.oncotator{}.vcf'.format(uuid, config.suffix) annotated_vcf.addChildJobFn(output_file_job, filename, annotated_vcf.rv(), output_dir, s3_key_path=config.ssec, disk=PromisedRequirement(lambda x: x.size, annotated_vcf.rv()))
python
def annotate_vcfs(job, vcfs, config): """ Runs Oncotator for a group of VCF files. Each sample is annotated individually. :param JobFunctionWrappingJob job: passed automatically by Toil :param dict vcfs: Dictionary of VCF FileStoreIDs {Sample identifier: FileStoreID} :param Namespace config: Input parameters and shared FileStoreIDs Requires the following config attributes: config.oncotator_db FileStoreID to Oncotator database config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.cores Number of cores for each job config.xmx Java heap size in bytes """ job.fileStore.logToMaster('Running Oncotator on the following samples:\n%s' % '\n'.join(vcfs.keys())) for uuid, vcf_id in vcfs.iteritems(): # The Oncotator disk requirement depends on the input VCF, the Oncotator database # and the output VCF. The annotated VCF will be significantly larger than the input VCF. onco_disk = PromisedRequirement(lambda vcf, db: 3 * vcf.size + db.size, vcf_id, config.oncotator_db) annotated_vcf = job.addChildJobFn(run_oncotator, vcf_id, config.oncotator_db, disk=onco_disk, cores=config.cores, memory=config.xmx) output_dir = os.path.join(config.output_dir, uuid) filename = '{}.oncotator{}.vcf'.format(uuid, config.suffix) annotated_vcf.addChildJobFn(output_file_job, filename, annotated_vcf.rv(), output_dir, s3_key_path=config.ssec, disk=PromisedRequirement(lambda x: x.size, annotated_vcf.rv()))
[ "def", "annotate_vcfs", "(", "job", ",", "vcfs", ",", "config", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Running Oncotator on the following samples:\\n%s'", "%", "'\\n'", ".", "join", "(", "vcfs", ".", "keys", "(", ")", ")", ")", "for", "uuid", ",", "vcf_id", "in", "vcfs", ".", "iteritems", "(", ")", ":", "# The Oncotator disk requirement depends on the input VCF, the Oncotator database", "# and the output VCF. The annotated VCF will be significantly larger than the input VCF.", "onco_disk", "=", "PromisedRequirement", "(", "lambda", "vcf", ",", "db", ":", "3", "*", "vcf", ".", "size", "+", "db", ".", "size", ",", "vcf_id", ",", "config", ".", "oncotator_db", ")", "annotated_vcf", "=", "job", ".", "addChildJobFn", "(", "run_oncotator", ",", "vcf_id", ",", "config", ".", "oncotator_db", ",", "disk", "=", "onco_disk", ",", "cores", "=", "config", ".", "cores", ",", "memory", "=", "config", ".", "xmx", ")", "output_dir", "=", "os", ".", "path", ".", "join", "(", "config", ".", "output_dir", ",", "uuid", ")", "filename", "=", "'{}.oncotator{}.vcf'", ".", "format", "(", "uuid", ",", "config", ".", "suffix", ")", "annotated_vcf", ".", "addChildJobFn", "(", "output_file_job", ",", "filename", ",", "annotated_vcf", ".", "rv", "(", ")", ",", "output_dir", ",", "s3_key_path", "=", "config", ".", "ssec", ",", "disk", "=", "PromisedRequirement", "(", "lambda", "x", ":", "x", ".", "size", ",", "annotated_vcf", ".", "rv", "(", ")", ")", ")" ]
Runs Oncotator for a group of VCF files. Each sample is annotated individually. :param JobFunctionWrappingJob job: passed automatically by Toil :param dict vcfs: Dictionary of VCF FileStoreIDs {Sample identifier: FileStoreID} :param Namespace config: Input parameters and shared FileStoreIDs Requires the following config attributes: config.oncotator_db FileStoreID to Oncotator database config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.cores Number of cores for each job config.xmx Java heap size in bytes
[ "Runs", "Oncotator", "for", "a", "group", "of", "VCF", "files", ".", "Each", "sample", "is", "annotated", "individually", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L336-L373
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/germline.py
parse_manifest
def parse_manifest(path_to_manifest): """ Parses manifest file for Toil Germline Pipeline :param str path_to_manifest: Path to sample manifest file :return: List of GermlineSample namedtuples :rtype: list[GermlineSample] """ bam_re = r"^(?P<uuid>\S+)\s(?P<url>\S+[bsc][r]?am)" fq_re = r"^(?P<uuid>\S+)\s(?P<url>\S+)\s(?P<paired_url>\S+)?\s?(?P<rg_line>@RG\S+)" samples = [] with open(path_to_manifest, 'r') as f: for line in f.readlines(): line = line.strip() if line.startswith('#'): continue bam_match = re.match(bam_re, line) fastq_match = re.match(fq_re, line) if bam_match: uuid = bam_match.group('uuid') url = bam_match.group('url') paired_url = None rg_line = None require('.bam' in url.lower(), 'Expected .bam extension:\n{}:\t{}'.format(uuid, url)) elif fastq_match: uuid = fastq_match.group('uuid') url = fastq_match.group('url') paired_url = fastq_match.group('paired_url') rg_line = fastq_match.group('rg_line') require('.fq' in url.lower() or '.fastq' in url.lower(), 'Expected .fq extension:\n{}:\t{}'.format(uuid, url)) else: raise ValueError('Could not parse entry in manifest: %s\n%s' % (f.name, line)) # Checks that URL has a scheme require(urlparse(url).scheme, 'Invalid URL passed for {}'.format(url)) samples.append(GermlineSample(uuid, url, paired_url, rg_line)) return samples
python
def parse_manifest(path_to_manifest): """ Parses manifest file for Toil Germline Pipeline :param str path_to_manifest: Path to sample manifest file :return: List of GermlineSample namedtuples :rtype: list[GermlineSample] """ bam_re = r"^(?P<uuid>\S+)\s(?P<url>\S+[bsc][r]?am)" fq_re = r"^(?P<uuid>\S+)\s(?P<url>\S+)\s(?P<paired_url>\S+)?\s?(?P<rg_line>@RG\S+)" samples = [] with open(path_to_manifest, 'r') as f: for line in f.readlines(): line = line.strip() if line.startswith('#'): continue bam_match = re.match(bam_re, line) fastq_match = re.match(fq_re, line) if bam_match: uuid = bam_match.group('uuid') url = bam_match.group('url') paired_url = None rg_line = None require('.bam' in url.lower(), 'Expected .bam extension:\n{}:\t{}'.format(uuid, url)) elif fastq_match: uuid = fastq_match.group('uuid') url = fastq_match.group('url') paired_url = fastq_match.group('paired_url') rg_line = fastq_match.group('rg_line') require('.fq' in url.lower() or '.fastq' in url.lower(), 'Expected .fq extension:\n{}:\t{}'.format(uuid, url)) else: raise ValueError('Could not parse entry in manifest: %s\n%s' % (f.name, line)) # Checks that URL has a scheme require(urlparse(url).scheme, 'Invalid URL passed for {}'.format(url)) samples.append(GermlineSample(uuid, url, paired_url, rg_line)) return samples
[ "def", "parse_manifest", "(", "path_to_manifest", ")", ":", "bam_re", "=", "r\"^(?P<uuid>\\S+)\\s(?P<url>\\S+[bsc][r]?am)\"", "fq_re", "=", "r\"^(?P<uuid>\\S+)\\s(?P<url>\\S+)\\s(?P<paired_url>\\S+)?\\s?(?P<rg_line>@RG\\S+)\"", "samples", "=", "[", "]", "with", "open", "(", "path_to_manifest", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "continue", "bam_match", "=", "re", ".", "match", "(", "bam_re", ",", "line", ")", "fastq_match", "=", "re", ".", "match", "(", "fq_re", ",", "line", ")", "if", "bam_match", ":", "uuid", "=", "bam_match", ".", "group", "(", "'uuid'", ")", "url", "=", "bam_match", ".", "group", "(", "'url'", ")", "paired_url", "=", "None", "rg_line", "=", "None", "require", "(", "'.bam'", "in", "url", ".", "lower", "(", ")", ",", "'Expected .bam extension:\\n{}:\\t{}'", ".", "format", "(", "uuid", ",", "url", ")", ")", "elif", "fastq_match", ":", "uuid", "=", "fastq_match", ".", "group", "(", "'uuid'", ")", "url", "=", "fastq_match", ".", "group", "(", "'url'", ")", "paired_url", "=", "fastq_match", ".", "group", "(", "'paired_url'", ")", "rg_line", "=", "fastq_match", ".", "group", "(", "'rg_line'", ")", "require", "(", "'.fq'", "in", "url", ".", "lower", "(", ")", "or", "'.fastq'", "in", "url", ".", "lower", "(", ")", ",", "'Expected .fq extension:\\n{}:\\t{}'", ".", "format", "(", "uuid", ",", "url", ")", ")", "else", ":", "raise", "ValueError", "(", "'Could not parse entry in manifest: %s\\n%s'", "%", "(", "f", ".", "name", ",", "line", ")", ")", "# Checks that URL has a scheme", "require", "(", "urlparse", "(", "url", ")", ".", "scheme", ",", "'Invalid URL passed for {}'", ".", "format", "(", "url", ")", ")", "samples", ".", "append", "(", "GermlineSample", "(", "uuid", ",", "url", ",", "paired_url", ",", "rg_line", ")", ")", "return", "samples" ]
Parses manifest file for Toil Germline Pipeline :param str path_to_manifest: Path to sample manifest file :return: List of GermlineSample namedtuples :rtype: list[GermlineSample]
[ "Parses", "manifest", "file", "for", "Toil", "Germline", "Pipeline" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L379-L416
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/germline.py
download_shared_files
def download_shared_files(job, config): """ Downloads shared reference files for Toil Germline pipeline :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options :return: Updated config with shared fileStoreIDS :rtype: Namespace """ job.fileStore.logToMaster('Downloading shared reference files') shared_files = {'genome_fasta', 'genome_fai', 'genome_dict'} nonessential_files = {'genome_fai', 'genome_dict'} # Download necessary files for pipeline configuration if config.run_bwa: shared_files |= {'amb', 'ann', 'bwt', 'pac', 'sa', 'alt'} nonessential_files.add('alt') if config.preprocess: shared_files |= {'g1k_indel', 'mills', 'dbsnp'} if config.run_vqsr: shared_files |= {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'} if config.run_oncotator: shared_files.add('oncotator_db') for name in shared_files: try: url = getattr(config, name, None) if url is None: continue setattr(config, name, job.addChildJobFn(download_url_job, url, name=name, s3_key_path=config.ssec, disk='15G' # Estimated reference file size ).rv()) finally: if getattr(config, name, None) is None and name not in nonessential_files: raise ValueError("Necessary configuration parameter is missing:\n{}".format(name)) return job.addFollowOnJobFn(reference_preprocessing, config).rv()
python
def download_shared_files(job, config): """ Downloads shared reference files for Toil Germline pipeline :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options :return: Updated config with shared fileStoreIDS :rtype: Namespace """ job.fileStore.logToMaster('Downloading shared reference files') shared_files = {'genome_fasta', 'genome_fai', 'genome_dict'} nonessential_files = {'genome_fai', 'genome_dict'} # Download necessary files for pipeline configuration if config.run_bwa: shared_files |= {'amb', 'ann', 'bwt', 'pac', 'sa', 'alt'} nonessential_files.add('alt') if config.preprocess: shared_files |= {'g1k_indel', 'mills', 'dbsnp'} if config.run_vqsr: shared_files |= {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'} if config.run_oncotator: shared_files.add('oncotator_db') for name in shared_files: try: url = getattr(config, name, None) if url is None: continue setattr(config, name, job.addChildJobFn(download_url_job, url, name=name, s3_key_path=config.ssec, disk='15G' # Estimated reference file size ).rv()) finally: if getattr(config, name, None) is None and name not in nonessential_files: raise ValueError("Necessary configuration parameter is missing:\n{}".format(name)) return job.addFollowOnJobFn(reference_preprocessing, config).rv()
[ "def", "download_shared_files", "(", "job", ",", "config", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Downloading shared reference files'", ")", "shared_files", "=", "{", "'genome_fasta'", ",", "'genome_fai'", ",", "'genome_dict'", "}", "nonessential_files", "=", "{", "'genome_fai'", ",", "'genome_dict'", "}", "# Download necessary files for pipeline configuration", "if", "config", ".", "run_bwa", ":", "shared_files", "|=", "{", "'amb'", ",", "'ann'", ",", "'bwt'", ",", "'pac'", ",", "'sa'", ",", "'alt'", "}", "nonessential_files", ".", "add", "(", "'alt'", ")", "if", "config", ".", "preprocess", ":", "shared_files", "|=", "{", "'g1k_indel'", ",", "'mills'", ",", "'dbsnp'", "}", "if", "config", ".", "run_vqsr", ":", "shared_files", "|=", "{", "'g1k_snp'", ",", "'mills'", ",", "'dbsnp'", ",", "'hapmap'", ",", "'omni'", "}", "if", "config", ".", "run_oncotator", ":", "shared_files", ".", "add", "(", "'oncotator_db'", ")", "for", "name", "in", "shared_files", ":", "try", ":", "url", "=", "getattr", "(", "config", ",", "name", ",", "None", ")", "if", "url", "is", "None", ":", "continue", "setattr", "(", "config", ",", "name", ",", "job", ".", "addChildJobFn", "(", "download_url_job", ",", "url", ",", "name", "=", "name", ",", "s3_key_path", "=", "config", ".", "ssec", ",", "disk", "=", "'15G'", "# Estimated reference file size", ")", ".", "rv", "(", ")", ")", "finally", ":", "if", "getattr", "(", "config", ",", "name", ",", "None", ")", "is", "None", "and", "name", "not", "in", "nonessential_files", ":", "raise", "ValueError", "(", "\"Necessary configuration parameter is missing:\\n{}\"", ".", "format", "(", "name", ")", ")", "return", "job", ".", "addFollowOnJobFn", "(", "reference_preprocessing", ",", "config", ")", ".", "rv", "(", ")" ]
Downloads shared reference files for Toil Germline pipeline :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options :return: Updated config with shared fileStoreIDS :rtype: Namespace
[ "Downloads", "shared", "reference", "files", "for", "Toil", "Germline", "pipeline" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L419-L456
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/germline.py
reference_preprocessing
def reference_preprocessing(job, config): """ Creates a genome fasta index and sequence dictionary file if not already present in the pipeline config. :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options and shared files. Requires FileStoreID for genome fasta file as config.genome_fasta :return: Updated config with reference index files :rtype: Namespace """ job.fileStore.logToMaster('Preparing Reference Files') genome_id = config.genome_fasta if getattr(config, 'genome_fai', None) is None: config.genome_fai = job.addChildJobFn(run_samtools_faidx, genome_id, cores=config.cores).rv() if getattr(config, 'genome_dict', None) is None: config.genome_dict = job.addChildJobFn(run_picard_create_sequence_dictionary, genome_id, cores=config.cores, memory=config.xmx).rv() return config
python
def reference_preprocessing(job, config): """ Creates a genome fasta index and sequence dictionary file if not already present in the pipeline config. :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options and shared files. Requires FileStoreID for genome fasta file as config.genome_fasta :return: Updated config with reference index files :rtype: Namespace """ job.fileStore.logToMaster('Preparing Reference Files') genome_id = config.genome_fasta if getattr(config, 'genome_fai', None) is None: config.genome_fai = job.addChildJobFn(run_samtools_faidx, genome_id, cores=config.cores).rv() if getattr(config, 'genome_dict', None) is None: config.genome_dict = job.addChildJobFn(run_picard_create_sequence_dictionary, genome_id, cores=config.cores, memory=config.xmx).rv() return config
[ "def", "reference_preprocessing", "(", "job", ",", "config", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Preparing Reference Files'", ")", "genome_id", "=", "config", ".", "genome_fasta", "if", "getattr", "(", "config", ",", "'genome_fai'", ",", "None", ")", "is", "None", ":", "config", ".", "genome_fai", "=", "job", ".", "addChildJobFn", "(", "run_samtools_faidx", ",", "genome_id", ",", "cores", "=", "config", ".", "cores", ")", ".", "rv", "(", ")", "if", "getattr", "(", "config", ",", "'genome_dict'", ",", "None", ")", "is", "None", ":", "config", ".", "genome_dict", "=", "job", ".", "addChildJobFn", "(", "run_picard_create_sequence_dictionary", ",", "genome_id", ",", "cores", "=", "config", ".", "cores", ",", "memory", "=", "config", ".", "xmx", ")", ".", "rv", "(", ")", "return", "config" ]
Creates a genome fasta index and sequence dictionary file if not already present in the pipeline config. :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options and shared files. Requires FileStoreID for genome fasta file as config.genome_fasta :return: Updated config with reference index files :rtype: Namespace
[ "Creates", "a", "genome", "fasta", "index", "and", "sequence", "dictionary", "file", "if", "not", "already", "present", "in", "the", "pipeline", "config", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L459-L480
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/germline.py
prepare_bam
def prepare_bam(job, uuid, url, config, paired_url=None, rg_line=None): """ Prepares BAM file for Toil germline pipeline. Steps in pipeline 0: Download and align BAM or FASTQ sample 1: Sort BAM 2: Index BAM 3: Run GATK preprocessing pipeline (Optional) - Uploads preprocessed BAM to output directory :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: Unique identifier for the sample :param str url: URL or local path to BAM file or FASTQs :param Namespace config: Configuration options for pipeline Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.g1k_indel FileStoreID for 1000G INDEL resource file config.mills FileStoreID for Mills resource file config.dbsnp FileStoreID for dbSNP resource file config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.cores Number of cores for each job config.xmx Java heap size in bytes :param str|None paired_url: URL or local path to paired FASTQ file, default is None :param str|None rg_line: RG line for BWA alignment (i.e. @RG\tID:foo\tSM:bar), default is None :return: BAM and BAI FileStoreIDs :rtype: tuple """ # 0: Align FASTQ or realign BAM if config.run_bwa: get_bam = job.wrapJobFn(setup_and_run_bwakit, uuid, url, rg_line, config, paired_url=paired_url).encapsulate() # 0: Download BAM elif '.bam' in url.lower(): job.fileStore.logToMaster("Downloading BAM: %s" % uuid) get_bam = job.wrapJobFn(download_url_job, url, name='toil.bam', s3_key_path=config.ssec, disk=config.file_size).encapsulate() else: raise ValueError('Could not generate BAM file for %s\n' 'Provide a FASTQ URL and set run-bwa or ' 'provide a BAM URL that includes .bam extension.' % uuid) # 1: Sort BAM file if necessary # Realigning BAM file shuffles read order if config.sorted and not config.run_bwa: sorted_bam = get_bam else: # The samtools sort disk requirement depends on the input bam, the tmp files, and the # sorted output bam. sorted_bam_disk = PromisedRequirement(lambda bam: 3 * bam.size, get_bam.rv()) sorted_bam = get_bam.addChildJobFn(run_samtools_sort, get_bam.rv(), cores=config.cores, disk=sorted_bam_disk) # 2: Index BAM # The samtools index disk requirement depends on the input bam and the output bam index index_bam_disk = PromisedRequirement(lambda bam: bam.size, sorted_bam.rv()) index_bam = job.wrapJobFn(run_samtools_index, sorted_bam.rv(), disk=index_bam_disk) job.addChild(get_bam) sorted_bam.addChild(index_bam) if config.preprocess: preprocess = job.wrapJobFn(run_gatk_preprocessing, sorted_bam.rv(), index_bam.rv(), config.genome_fasta, config.genome_dict, config.genome_fai, config.g1k_indel, config.mills, config.dbsnp, memory=config.xmx, cores=config.cores).encapsulate() sorted_bam.addChild(preprocess) index_bam.addChild(preprocess) # Update output BAM promises output_bam_promise = preprocess.rv(0) output_bai_promise = preprocess.rv(1) # Save processed BAM output_dir = os.path.join(config.output_dir, uuid) filename = '{}.preprocessed{}.bam'.format(uuid, config.suffix) output_bam = job.wrapJobFn(output_file_job, filename, preprocess.rv(0), output_dir, s3_key_path=config.ssec) preprocess.addChild(output_bam) else: output_bam_promise = sorted_bam.rv() output_bai_promise = index_bam.rv() return output_bam_promise, output_bai_promise
python
def prepare_bam(job, uuid, url, config, paired_url=None, rg_line=None): """ Prepares BAM file for Toil germline pipeline. Steps in pipeline 0: Download and align BAM or FASTQ sample 1: Sort BAM 2: Index BAM 3: Run GATK preprocessing pipeline (Optional) - Uploads preprocessed BAM to output directory :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: Unique identifier for the sample :param str url: URL or local path to BAM file or FASTQs :param Namespace config: Configuration options for pipeline Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.g1k_indel FileStoreID for 1000G INDEL resource file config.mills FileStoreID for Mills resource file config.dbsnp FileStoreID for dbSNP resource file config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.cores Number of cores for each job config.xmx Java heap size in bytes :param str|None paired_url: URL or local path to paired FASTQ file, default is None :param str|None rg_line: RG line for BWA alignment (i.e. @RG\tID:foo\tSM:bar), default is None :return: BAM and BAI FileStoreIDs :rtype: tuple """ # 0: Align FASTQ or realign BAM if config.run_bwa: get_bam = job.wrapJobFn(setup_and_run_bwakit, uuid, url, rg_line, config, paired_url=paired_url).encapsulate() # 0: Download BAM elif '.bam' in url.lower(): job.fileStore.logToMaster("Downloading BAM: %s" % uuid) get_bam = job.wrapJobFn(download_url_job, url, name='toil.bam', s3_key_path=config.ssec, disk=config.file_size).encapsulate() else: raise ValueError('Could not generate BAM file for %s\n' 'Provide a FASTQ URL and set run-bwa or ' 'provide a BAM URL that includes .bam extension.' % uuid) # 1: Sort BAM file if necessary # Realigning BAM file shuffles read order if config.sorted and not config.run_bwa: sorted_bam = get_bam else: # The samtools sort disk requirement depends on the input bam, the tmp files, and the # sorted output bam. sorted_bam_disk = PromisedRequirement(lambda bam: 3 * bam.size, get_bam.rv()) sorted_bam = get_bam.addChildJobFn(run_samtools_sort, get_bam.rv(), cores=config.cores, disk=sorted_bam_disk) # 2: Index BAM # The samtools index disk requirement depends on the input bam and the output bam index index_bam_disk = PromisedRequirement(lambda bam: bam.size, sorted_bam.rv()) index_bam = job.wrapJobFn(run_samtools_index, sorted_bam.rv(), disk=index_bam_disk) job.addChild(get_bam) sorted_bam.addChild(index_bam) if config.preprocess: preprocess = job.wrapJobFn(run_gatk_preprocessing, sorted_bam.rv(), index_bam.rv(), config.genome_fasta, config.genome_dict, config.genome_fai, config.g1k_indel, config.mills, config.dbsnp, memory=config.xmx, cores=config.cores).encapsulate() sorted_bam.addChild(preprocess) index_bam.addChild(preprocess) # Update output BAM promises output_bam_promise = preprocess.rv(0) output_bai_promise = preprocess.rv(1) # Save processed BAM output_dir = os.path.join(config.output_dir, uuid) filename = '{}.preprocessed{}.bam'.format(uuid, config.suffix) output_bam = job.wrapJobFn(output_file_job, filename, preprocess.rv(0), output_dir, s3_key_path=config.ssec) preprocess.addChild(output_bam) else: output_bam_promise = sorted_bam.rv() output_bai_promise = index_bam.rv() return output_bam_promise, output_bai_promise
[ "def", "prepare_bam", "(", "job", ",", "uuid", ",", "url", ",", "config", ",", "paired_url", "=", "None", ",", "rg_line", "=", "None", ")", ":", "# 0: Align FASTQ or realign BAM", "if", "config", ".", "run_bwa", ":", "get_bam", "=", "job", ".", "wrapJobFn", "(", "setup_and_run_bwakit", ",", "uuid", ",", "url", ",", "rg_line", ",", "config", ",", "paired_url", "=", "paired_url", ")", ".", "encapsulate", "(", ")", "# 0: Download BAM", "elif", "'.bam'", "in", "url", ".", "lower", "(", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "\"Downloading BAM: %s\"", "%", "uuid", ")", "get_bam", "=", "job", ".", "wrapJobFn", "(", "download_url_job", ",", "url", ",", "name", "=", "'toil.bam'", ",", "s3_key_path", "=", "config", ".", "ssec", ",", "disk", "=", "config", ".", "file_size", ")", ".", "encapsulate", "(", ")", "else", ":", "raise", "ValueError", "(", "'Could not generate BAM file for %s\\n'", "'Provide a FASTQ URL and set run-bwa or '", "'provide a BAM URL that includes .bam extension.'", "%", "uuid", ")", "# 1: Sort BAM file if necessary", "# Realigning BAM file shuffles read order", "if", "config", ".", "sorted", "and", "not", "config", ".", "run_bwa", ":", "sorted_bam", "=", "get_bam", "else", ":", "# The samtools sort disk requirement depends on the input bam, the tmp files, and the", "# sorted output bam.", "sorted_bam_disk", "=", "PromisedRequirement", "(", "lambda", "bam", ":", "3", "*", "bam", ".", "size", ",", "get_bam", ".", "rv", "(", ")", ")", "sorted_bam", "=", "get_bam", ".", "addChildJobFn", "(", "run_samtools_sort", ",", "get_bam", ".", "rv", "(", ")", ",", "cores", "=", "config", ".", "cores", ",", "disk", "=", "sorted_bam_disk", ")", "# 2: Index BAM", "# The samtools index disk requirement depends on the input bam and the output bam index", "index_bam_disk", "=", "PromisedRequirement", "(", "lambda", "bam", ":", "bam", ".", "size", ",", "sorted_bam", ".", "rv", "(", ")", ")", "index_bam", "=", "job", ".", "wrapJobFn", "(", "run_samtools_index", ",", "sorted_bam", ".", "rv", "(", ")", ",", "disk", "=", "index_bam_disk", ")", "job", ".", "addChild", "(", "get_bam", ")", "sorted_bam", ".", "addChild", "(", "index_bam", ")", "if", "config", ".", "preprocess", ":", "preprocess", "=", "job", ".", "wrapJobFn", "(", "run_gatk_preprocessing", ",", "sorted_bam", ".", "rv", "(", ")", ",", "index_bam", ".", "rv", "(", ")", ",", "config", ".", "genome_fasta", ",", "config", ".", "genome_dict", ",", "config", ".", "genome_fai", ",", "config", ".", "g1k_indel", ",", "config", ".", "mills", ",", "config", ".", "dbsnp", ",", "memory", "=", "config", ".", "xmx", ",", "cores", "=", "config", ".", "cores", ")", ".", "encapsulate", "(", ")", "sorted_bam", ".", "addChild", "(", "preprocess", ")", "index_bam", ".", "addChild", "(", "preprocess", ")", "# Update output BAM promises", "output_bam_promise", "=", "preprocess", ".", "rv", "(", "0", ")", "output_bai_promise", "=", "preprocess", ".", "rv", "(", "1", ")", "# Save processed BAM", "output_dir", "=", "os", ".", "path", ".", "join", "(", "config", ".", "output_dir", ",", "uuid", ")", "filename", "=", "'{}.preprocessed{}.bam'", ".", "format", "(", "uuid", ",", "config", ".", "suffix", ")", "output_bam", "=", "job", ".", "wrapJobFn", "(", "output_file_job", ",", "filename", ",", "preprocess", ".", "rv", "(", "0", ")", ",", "output_dir", ",", "s3_key_path", "=", "config", ".", "ssec", ")", "preprocess", ".", "addChild", "(", "output_bam", ")", "else", ":", "output_bam_promise", "=", "sorted_bam", ".", "rv", "(", ")", "output_bai_promise", "=", "index_bam", ".", "rv", "(", ")", "return", "output_bam_promise", ",", "output_bai_promise" ]
Prepares BAM file for Toil germline pipeline. Steps in pipeline 0: Download and align BAM or FASTQ sample 1: Sort BAM 2: Index BAM 3: Run GATK preprocessing pipeline (Optional) - Uploads preprocessed BAM to output directory :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: Unique identifier for the sample :param str url: URL or local path to BAM file or FASTQs :param Namespace config: Configuration options for pipeline Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.g1k_indel FileStoreID for 1000G INDEL resource file config.mills FileStoreID for Mills resource file config.dbsnp FileStoreID for dbSNP resource file config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.cores Number of cores for each job config.xmx Java heap size in bytes :param str|None paired_url: URL or local path to paired FASTQ file, default is None :param str|None rg_line: RG line for BWA alignment (i.e. @RG\tID:foo\tSM:bar), default is None :return: BAM and BAI FileStoreIDs :rtype: tuple
[ "Prepares", "BAM", "file", "for", "Toil", "germline", "pipeline", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L483-L592
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/germline.py
setup_and_run_bwakit
def setup_and_run_bwakit(job, uuid, url, rg_line, config, paired_url=None): """ Downloads and runs bwakit for BAM or FASTQ files :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: Unique sample identifier :param str url: FASTQ or BAM file URL. BAM alignment URL must have .bam extension. :param Namespace config: Input parameters and shared FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.cores Number of cores for each job config.trim If True, trim adapters using bwakit config.amb FileStoreID for BWA index file prefix.amb config.ann FileStoreID for BWA index file prefix.ann config.bwt FileStoreID for BWA index file prefix.bwt config.pac FileStoreID for BWA index file prefix.pac config.sa FileStoreID for BWA index file prefix.sa config.alt FileStoreID for alternate contigs file or None :param str|None paired_url: URL to paired FASTQ :param str|None rg_line: Read group line (i.e. @RG\tID:foo\tSM:bar) :return: BAM FileStoreID :rtype: str """ bwa_config = deepcopy(config) bwa_config.uuid = uuid bwa_config.rg_line = rg_line # bwa_alignment uses a different naming convention bwa_config.ref = config.genome_fasta bwa_config.fai = config.genome_fai # Determine if sample is a FASTQ or BAM file using the file extension basename, ext = os.path.splitext(url) ext = ext.lower() if ext == '.gz': _, ext = os.path.splitext(basename) ext = ext.lower() # The pipeline currently supports FASTQ and BAM files require(ext in ['.fq', '.fastq', '.bam'], 'Please use .fq or .bam file extensions:\n%s' % url) # Download fastq files samples = [] input1 = job.addChildJobFn(download_url_job, url, name='file1', s3_key_path=config.ssec, disk=config.file_size) samples.append(input1.rv()) # If the extension is for a BAM file, then configure bwakit to realign the BAM file. if ext == '.bam': bwa_config.bam = input1.rv() else: bwa_config.r1 = input1.rv() # Download the paired FASTQ URL if paired_url: input2 = job.addChildJobFn(download_url_job, paired_url, name='file2', s3_key_path=config.ssec, disk=config.file_size) samples.append(input2.rv()) bwa_config.r2 = input2.rv() # The bwakit disk requirement depends on the size of the input files and the index # Take the sum of the input files and scale it by a factor of 4 bwa_index_size = sum([getattr(config, index_file).size for index_file in ['amb', 'ann', 'bwt', 'pac', 'sa', 'alt'] if getattr(config, index_file, None) is not None]) bwakit_disk = PromisedRequirement(lambda lst, index_size: int(4 * sum(x.size for x in lst) + index_size), samples, bwa_index_size) return job.addFollowOnJobFn(run_bwakit, bwa_config, sort=False, # BAM files are sorted later in the pipeline trim=config.trim, cores=config.cores, disk=bwakit_disk).rv()
python
def setup_and_run_bwakit(job, uuid, url, rg_line, config, paired_url=None): """ Downloads and runs bwakit for BAM or FASTQ files :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: Unique sample identifier :param str url: FASTQ or BAM file URL. BAM alignment URL must have .bam extension. :param Namespace config: Input parameters and shared FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.cores Number of cores for each job config.trim If True, trim adapters using bwakit config.amb FileStoreID for BWA index file prefix.amb config.ann FileStoreID for BWA index file prefix.ann config.bwt FileStoreID for BWA index file prefix.bwt config.pac FileStoreID for BWA index file prefix.pac config.sa FileStoreID for BWA index file prefix.sa config.alt FileStoreID for alternate contigs file or None :param str|None paired_url: URL to paired FASTQ :param str|None rg_line: Read group line (i.e. @RG\tID:foo\tSM:bar) :return: BAM FileStoreID :rtype: str """ bwa_config = deepcopy(config) bwa_config.uuid = uuid bwa_config.rg_line = rg_line # bwa_alignment uses a different naming convention bwa_config.ref = config.genome_fasta bwa_config.fai = config.genome_fai # Determine if sample is a FASTQ or BAM file using the file extension basename, ext = os.path.splitext(url) ext = ext.lower() if ext == '.gz': _, ext = os.path.splitext(basename) ext = ext.lower() # The pipeline currently supports FASTQ and BAM files require(ext in ['.fq', '.fastq', '.bam'], 'Please use .fq or .bam file extensions:\n%s' % url) # Download fastq files samples = [] input1 = job.addChildJobFn(download_url_job, url, name='file1', s3_key_path=config.ssec, disk=config.file_size) samples.append(input1.rv()) # If the extension is for a BAM file, then configure bwakit to realign the BAM file. if ext == '.bam': bwa_config.bam = input1.rv() else: bwa_config.r1 = input1.rv() # Download the paired FASTQ URL if paired_url: input2 = job.addChildJobFn(download_url_job, paired_url, name='file2', s3_key_path=config.ssec, disk=config.file_size) samples.append(input2.rv()) bwa_config.r2 = input2.rv() # The bwakit disk requirement depends on the size of the input files and the index # Take the sum of the input files and scale it by a factor of 4 bwa_index_size = sum([getattr(config, index_file).size for index_file in ['amb', 'ann', 'bwt', 'pac', 'sa', 'alt'] if getattr(config, index_file, None) is not None]) bwakit_disk = PromisedRequirement(lambda lst, index_size: int(4 * sum(x.size for x in lst) + index_size), samples, bwa_index_size) return job.addFollowOnJobFn(run_bwakit, bwa_config, sort=False, # BAM files are sorted later in the pipeline trim=config.trim, cores=config.cores, disk=bwakit_disk).rv()
[ "def", "setup_and_run_bwakit", "(", "job", ",", "uuid", ",", "url", ",", "rg_line", ",", "config", ",", "paired_url", "=", "None", ")", ":", "bwa_config", "=", "deepcopy", "(", "config", ")", "bwa_config", ".", "uuid", "=", "uuid", "bwa_config", ".", "rg_line", "=", "rg_line", "# bwa_alignment uses a different naming convention", "bwa_config", ".", "ref", "=", "config", ".", "genome_fasta", "bwa_config", ".", "fai", "=", "config", ".", "genome_fai", "# Determine if sample is a FASTQ or BAM file using the file extension", "basename", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "url", ")", "ext", "=", "ext", ".", "lower", "(", ")", "if", "ext", "==", "'.gz'", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "basename", ")", "ext", "=", "ext", ".", "lower", "(", ")", "# The pipeline currently supports FASTQ and BAM files", "require", "(", "ext", "in", "[", "'.fq'", ",", "'.fastq'", ",", "'.bam'", "]", ",", "'Please use .fq or .bam file extensions:\\n%s'", "%", "url", ")", "# Download fastq files", "samples", "=", "[", "]", "input1", "=", "job", ".", "addChildJobFn", "(", "download_url_job", ",", "url", ",", "name", "=", "'file1'", ",", "s3_key_path", "=", "config", ".", "ssec", ",", "disk", "=", "config", ".", "file_size", ")", "samples", ".", "append", "(", "input1", ".", "rv", "(", ")", ")", "# If the extension is for a BAM file, then configure bwakit to realign the BAM file.", "if", "ext", "==", "'.bam'", ":", "bwa_config", ".", "bam", "=", "input1", ".", "rv", "(", ")", "else", ":", "bwa_config", ".", "r1", "=", "input1", ".", "rv", "(", ")", "# Download the paired FASTQ URL", "if", "paired_url", ":", "input2", "=", "job", ".", "addChildJobFn", "(", "download_url_job", ",", "paired_url", ",", "name", "=", "'file2'", ",", "s3_key_path", "=", "config", ".", "ssec", ",", "disk", "=", "config", ".", "file_size", ")", "samples", ".", "append", "(", "input2", ".", "rv", "(", ")", ")", "bwa_config", ".", "r2", "=", "input2", ".", "rv", "(", ")", "# The bwakit disk requirement depends on the size of the input files and the index", "# Take the sum of the input files and scale it by a factor of 4", "bwa_index_size", "=", "sum", "(", "[", "getattr", "(", "config", ",", "index_file", ")", ".", "size", "for", "index_file", "in", "[", "'amb'", ",", "'ann'", ",", "'bwt'", ",", "'pac'", ",", "'sa'", ",", "'alt'", "]", "if", "getattr", "(", "config", ",", "index_file", ",", "None", ")", "is", "not", "None", "]", ")", "bwakit_disk", "=", "PromisedRequirement", "(", "lambda", "lst", ",", "index_size", ":", "int", "(", "4", "*", "sum", "(", "x", ".", "size", "for", "x", "in", "lst", ")", "+", "index_size", ")", ",", "samples", ",", "bwa_index_size", ")", "return", "job", ".", "addFollowOnJobFn", "(", "run_bwakit", ",", "bwa_config", ",", "sort", "=", "False", ",", "# BAM files are sorted later in the pipeline", "trim", "=", "config", ".", "trim", ",", "cores", "=", "config", ".", "cores", ",", "disk", "=", "bwakit_disk", ")", ".", "rv", "(", ")" ]
Downloads and runs bwakit for BAM or FASTQ files :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: Unique sample identifier :param str url: FASTQ or BAM file URL. BAM alignment URL must have .bam extension. :param Namespace config: Input parameters and shared FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.cores Number of cores for each job config.trim If True, trim adapters using bwakit config.amb FileStoreID for BWA index file prefix.amb config.ann FileStoreID for BWA index file prefix.ann config.bwt FileStoreID for BWA index file prefix.bwt config.pac FileStoreID for BWA index file prefix.pac config.sa FileStoreID for BWA index file prefix.sa config.alt FileStoreID for alternate contigs file or None :param str|None paired_url: URL to paired FASTQ :param str|None rg_line: Read group line (i.e. @RG\tID:foo\tSM:bar) :return: BAM FileStoreID :rtype: str
[ "Downloads", "and", "runs", "bwakit", "for", "BAM", "or", "FASTQ", "files" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L595-L680
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/germline.py
gatk_haplotype_caller
def gatk_haplotype_caller(job, bam, bai, ref, fai, ref_dict, annotations=None, emit_threshold=10.0, call_threshold=30.0, unsafe_mode=False, hc_output=None): """ Uses GATK HaplotypeCaller to identify SNPs and INDELs. Outputs variants in a Genomic VCF file. :param JobFunctionWrappingJob job: passed automatically by Toil :param str bam: FileStoreID for BAM file :param str bai: FileStoreID for BAM index file :param str ref: FileStoreID for reference genome fasta file :param str ref_dict: FileStoreID for reference sequence dictionary file :param str fai: FileStoreID for reference fasta index file :param list[str] annotations: List of GATK variant annotations, default is None :param float emit_threshold: Minimum phred-scale confidence threshold for a variant to be emitted, default is 10.0 :param float call_threshold: Minimum phred-scale confidence threshold for a variant to be called, default is 30.0 :param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY" :param str hc_output: URL or local path to pre-cooked VCF file, default is None :return: FileStoreID for GVCF file :rtype: str """ job.fileStore.logToMaster('Running GATK HaplotypeCaller') inputs = {'genome.fa': ref, 'genome.fa.fai': fai, 'genome.dict': ref_dict, 'input.bam': bam, 'input.bam.bai': bai} work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) # Call GATK -- HaplotypeCaller with parameters to produce a genomic VCF file: # https://software.broadinstitute.org/gatk/documentation/article?id=2803 command = ['-T', 'HaplotypeCaller', '-nct', str(job.cores), '-R', 'genome.fa', '-I', 'input.bam', '-o', 'output.g.vcf', '-stand_call_conf', str(call_threshold), '-stand_emit_conf', str(emit_threshold), '-variant_index_type', 'LINEAR', '-variant_index_parameter', '128000', '--genotyping_mode', 'Discovery', '--emitRefConfidence', 'GVCF'] if unsafe_mode: command = ['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'] + command if annotations: for annotation in annotations: command.extend(['-A', annotation]) # Uses docker_call mock mode to replace output with hc_output file outputs = {'output.g.vcf': hc_output} docker_call(job=job, work_dir=work_dir, env={'JAVA_OPTS': '-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)}, parameters=command, tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2', inputs=inputs.keys(), outputs=outputs, mock=True if outputs['output.g.vcf'] else False) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.g.vcf'))
python
def gatk_haplotype_caller(job, bam, bai, ref, fai, ref_dict, annotations=None, emit_threshold=10.0, call_threshold=30.0, unsafe_mode=False, hc_output=None): """ Uses GATK HaplotypeCaller to identify SNPs and INDELs. Outputs variants in a Genomic VCF file. :param JobFunctionWrappingJob job: passed automatically by Toil :param str bam: FileStoreID for BAM file :param str bai: FileStoreID for BAM index file :param str ref: FileStoreID for reference genome fasta file :param str ref_dict: FileStoreID for reference sequence dictionary file :param str fai: FileStoreID for reference fasta index file :param list[str] annotations: List of GATK variant annotations, default is None :param float emit_threshold: Minimum phred-scale confidence threshold for a variant to be emitted, default is 10.0 :param float call_threshold: Minimum phred-scale confidence threshold for a variant to be called, default is 30.0 :param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY" :param str hc_output: URL or local path to pre-cooked VCF file, default is None :return: FileStoreID for GVCF file :rtype: str """ job.fileStore.logToMaster('Running GATK HaplotypeCaller') inputs = {'genome.fa': ref, 'genome.fa.fai': fai, 'genome.dict': ref_dict, 'input.bam': bam, 'input.bam.bai': bai} work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) # Call GATK -- HaplotypeCaller with parameters to produce a genomic VCF file: # https://software.broadinstitute.org/gatk/documentation/article?id=2803 command = ['-T', 'HaplotypeCaller', '-nct', str(job.cores), '-R', 'genome.fa', '-I', 'input.bam', '-o', 'output.g.vcf', '-stand_call_conf', str(call_threshold), '-stand_emit_conf', str(emit_threshold), '-variant_index_type', 'LINEAR', '-variant_index_parameter', '128000', '--genotyping_mode', 'Discovery', '--emitRefConfidence', 'GVCF'] if unsafe_mode: command = ['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'] + command if annotations: for annotation in annotations: command.extend(['-A', annotation]) # Uses docker_call mock mode to replace output with hc_output file outputs = {'output.g.vcf': hc_output} docker_call(job=job, work_dir=work_dir, env={'JAVA_OPTS': '-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)}, parameters=command, tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2', inputs=inputs.keys(), outputs=outputs, mock=True if outputs['output.g.vcf'] else False) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.g.vcf'))
[ "def", "gatk_haplotype_caller", "(", "job", ",", "bam", ",", "bai", ",", "ref", ",", "fai", ",", "ref_dict", ",", "annotations", "=", "None", ",", "emit_threshold", "=", "10.0", ",", "call_threshold", "=", "30.0", ",", "unsafe_mode", "=", "False", ",", "hc_output", "=", "None", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Running GATK HaplotypeCaller'", ")", "inputs", "=", "{", "'genome.fa'", ":", "ref", ",", "'genome.fa.fai'", ":", "fai", ",", "'genome.dict'", ":", "ref_dict", ",", "'input.bam'", ":", "bam", ",", "'input.bam.bai'", ":", "bai", "}", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "for", "name", ",", "file_store_id", "in", "inputs", ".", "iteritems", "(", ")", ":", "job", ".", "fileStore", ".", "readGlobalFile", "(", "file_store_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "name", ")", ")", "# Call GATK -- HaplotypeCaller with parameters to produce a genomic VCF file:", "# https://software.broadinstitute.org/gatk/documentation/article?id=2803", "command", "=", "[", "'-T'", ",", "'HaplotypeCaller'", ",", "'-nct'", ",", "str", "(", "job", ".", "cores", ")", ",", "'-R'", ",", "'genome.fa'", ",", "'-I'", ",", "'input.bam'", ",", "'-o'", ",", "'output.g.vcf'", ",", "'-stand_call_conf'", ",", "str", "(", "call_threshold", ")", ",", "'-stand_emit_conf'", ",", "str", "(", "emit_threshold", ")", ",", "'-variant_index_type'", ",", "'LINEAR'", ",", "'-variant_index_parameter'", ",", "'128000'", ",", "'--genotyping_mode'", ",", "'Discovery'", ",", "'--emitRefConfidence'", ",", "'GVCF'", "]", "if", "unsafe_mode", ":", "command", "=", "[", "'-U'", ",", "'ALLOW_SEQ_DICT_INCOMPATIBILITY'", "]", "+", "command", "if", "annotations", ":", "for", "annotation", "in", "annotations", ":", "command", ".", "extend", "(", "[", "'-A'", ",", "annotation", "]", ")", "# Uses docker_call mock mode to replace output with hc_output file", "outputs", "=", "{", "'output.g.vcf'", ":", "hc_output", "}", "docker_call", "(", "job", "=", "job", ",", "work_dir", "=", "work_dir", ",", "env", "=", "{", "'JAVA_OPTS'", ":", "'-Djava.io.tmpdir=/data/ -Xmx{}'", ".", "format", "(", "job", ".", "memory", ")", "}", ",", "parameters", "=", "command", ",", "tool", "=", "'quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2'", ",", "inputs", "=", "inputs", ".", "keys", "(", ")", ",", "outputs", "=", "outputs", ",", "mock", "=", "True", "if", "outputs", "[", "'output.g.vcf'", "]", "else", "False", ")", "return", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'output.g.vcf'", ")", ")" ]
Uses GATK HaplotypeCaller to identify SNPs and INDELs. Outputs variants in a Genomic VCF file. :param JobFunctionWrappingJob job: passed automatically by Toil :param str bam: FileStoreID for BAM file :param str bai: FileStoreID for BAM index file :param str ref: FileStoreID for reference genome fasta file :param str ref_dict: FileStoreID for reference sequence dictionary file :param str fai: FileStoreID for reference fasta index file :param list[str] annotations: List of GATK variant annotations, default is None :param float emit_threshold: Minimum phred-scale confidence threshold for a variant to be emitted, default is 10.0 :param float call_threshold: Minimum phred-scale confidence threshold for a variant to be called, default is 30.0 :param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY" :param str hc_output: URL or local path to pre-cooked VCF file, default is None :return: FileStoreID for GVCF file :rtype: str
[ "Uses", "GATK", "HaplotypeCaller", "to", "identify", "SNPs", "and", "INDELs", ".", "Outputs", "variants", "in", "a", "Genomic", "VCF", "file", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L683-L749
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/germline.py
main
def main(): """ GATK germline pipeline with variant filtering and annotation. """ # Define Parser object and add to jobTree parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) # Generate subparsers subparsers = parser.add_subparsers(dest='command') subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.') subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.') subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.') # Run subparser parser_run = subparsers.add_parser('run', help='Runs the GATK germline pipeline') parser_run.add_argument('--config', required=True, type=str, help='Path to the (filled in) config file, generated with ' '"generate-config".') parser_run.add_argument('--manifest', type=str, help='Path to the (filled in) manifest file, generated with ' '"generate-manifest".\nDefault value: "%(default)s".') parser_run.add_argument('--sample', default=None, nargs=2, type=str, help='Input sample identifier and BAM file URL or local path') parser_run.add_argument('--output-dir', default=None, help='Path/URL to output directory') parser_run.add_argument('-s', '--suffix', default=None, help='Additional suffix to add to the names of the output files') parser_run.add_argument('--preprocess-only', action='store_true', help='Only runs preprocessing steps') Job.Runner.addToilOptions(parser_run) options = parser.parse_args() cwd = os.getcwd() if options.command == 'generate-config' or options.command == 'generate': generate_file(os.path.join(cwd, 'config-toil-germline.yaml'), generate_config) if options.command == 'generate-manifest' or options.command == 'generate': generate_file(os.path.join(cwd, 'manifest-toil-germline.tsv'), generate_manifest) elif options.command == 'run': # Program checks for program in ['curl', 'docker']: require(next(which(program)), program + ' must be installed on every node.'.format(program)) require(os.path.exists(options.config), '{} not found. Please run "generate-config"'.format(options.config)) # Read sample manifest samples = [] if options.manifest: samples.extend(parse_manifest(options.manifest)) # Add BAM sample from command line if options.sample: uuid, url = options.sample # samples tuple: (uuid, url, paired_url, rg_line) # BAM samples should not have as paired URL or read group line samples.append(GermlineSample(uuid, url, None, None)) require(len(samples) > 0, 'No samples were detected in the manifest or on the command line') # Parse inputs inputs = {x.replace('-', '_'): y for x, y in yaml.load(open(options.config).read()).iteritems()} required_fields = {'genome_fasta', 'output_dir', 'run_bwa', 'sorted', 'snp_filter_annotations', 'indel_filter_annotations', 'preprocess', 'preprocess_only', 'run_vqsr', 'joint_genotype', 'run_oncotator', 'cores', 'file_size', 'xmx', 'suffix'} input_fields = set(inputs.keys()) require(input_fields > required_fields, 'Missing config parameters:\n{}'.format(', '.join(required_fields - input_fields))) if inputs['output_dir'] is None: inputs['output_dir'] = options.output_dir require(inputs['output_dir'] is not None, 'Missing output directory PATH/URL') if inputs['suffix'] is None: inputs['suffix'] = options.suffix if options.suffix else '' if inputs['preprocess_only'] is None: inputs['preprocess_only'] = options.preprocess_only if inputs['run_vqsr']: # Check that essential VQSR parameters are present vqsr_fields = {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'} require(input_fields > vqsr_fields, 'Missing parameters for VQSR:\n{}'.format(', '.join(vqsr_fields - input_fields))) # Check that hard filtering parameters are present. If only running preprocessing steps, then we do # not need filtering information. elif not inputs['preprocess_only']: hard_filter_fields = {'snp_filter_name', 'snp_filter_expression', 'indel_filter_name', 'indel_filter_expression'} require(input_fields > hard_filter_fields, 'Missing parameters for hard filtering:\n{}'.format(', '.join(hard_filter_fields - input_fields))) # Check for falsey hard filtering parameters for hard_filter_field in hard_filter_fields: require(inputs[hard_filter_field], 'Missing %s value for hard filtering, ' 'got %s.' % (hard_filter_field, inputs[hard_filter_field])) # Set resource parameters inputs['xmx'] = human2bytes(inputs['xmx']) inputs['file_size'] = human2bytes(inputs['file_size']) inputs['cores'] = int(inputs['cores']) inputs['annotations'] = set(inputs['snp_filter_annotations'] + inputs['indel_filter_annotations']) # HaplotypeCaller test data for testing inputs['hc_output'] = inputs.get('hc_output', None) # It is a toil-scripts convention to store input parameters in a Namespace object config = argparse.Namespace(**inputs) root = Job.wrapJobFn(run_gatk_germline_pipeline, samples, config) Job.Runner.startToil(root, options)
python
def main(): """ GATK germline pipeline with variant filtering and annotation. """ # Define Parser object and add to jobTree parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) # Generate subparsers subparsers = parser.add_subparsers(dest='command') subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.') subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.') subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.') # Run subparser parser_run = subparsers.add_parser('run', help='Runs the GATK germline pipeline') parser_run.add_argument('--config', required=True, type=str, help='Path to the (filled in) config file, generated with ' '"generate-config".') parser_run.add_argument('--manifest', type=str, help='Path to the (filled in) manifest file, generated with ' '"generate-manifest".\nDefault value: "%(default)s".') parser_run.add_argument('--sample', default=None, nargs=2, type=str, help='Input sample identifier and BAM file URL or local path') parser_run.add_argument('--output-dir', default=None, help='Path/URL to output directory') parser_run.add_argument('-s', '--suffix', default=None, help='Additional suffix to add to the names of the output files') parser_run.add_argument('--preprocess-only', action='store_true', help='Only runs preprocessing steps') Job.Runner.addToilOptions(parser_run) options = parser.parse_args() cwd = os.getcwd() if options.command == 'generate-config' or options.command == 'generate': generate_file(os.path.join(cwd, 'config-toil-germline.yaml'), generate_config) if options.command == 'generate-manifest' or options.command == 'generate': generate_file(os.path.join(cwd, 'manifest-toil-germline.tsv'), generate_manifest) elif options.command == 'run': # Program checks for program in ['curl', 'docker']: require(next(which(program)), program + ' must be installed on every node.'.format(program)) require(os.path.exists(options.config), '{} not found. Please run "generate-config"'.format(options.config)) # Read sample manifest samples = [] if options.manifest: samples.extend(parse_manifest(options.manifest)) # Add BAM sample from command line if options.sample: uuid, url = options.sample # samples tuple: (uuid, url, paired_url, rg_line) # BAM samples should not have as paired URL or read group line samples.append(GermlineSample(uuid, url, None, None)) require(len(samples) > 0, 'No samples were detected in the manifest or on the command line') # Parse inputs inputs = {x.replace('-', '_'): y for x, y in yaml.load(open(options.config).read()).iteritems()} required_fields = {'genome_fasta', 'output_dir', 'run_bwa', 'sorted', 'snp_filter_annotations', 'indel_filter_annotations', 'preprocess', 'preprocess_only', 'run_vqsr', 'joint_genotype', 'run_oncotator', 'cores', 'file_size', 'xmx', 'suffix'} input_fields = set(inputs.keys()) require(input_fields > required_fields, 'Missing config parameters:\n{}'.format(', '.join(required_fields - input_fields))) if inputs['output_dir'] is None: inputs['output_dir'] = options.output_dir require(inputs['output_dir'] is not None, 'Missing output directory PATH/URL') if inputs['suffix'] is None: inputs['suffix'] = options.suffix if options.suffix else '' if inputs['preprocess_only'] is None: inputs['preprocess_only'] = options.preprocess_only if inputs['run_vqsr']: # Check that essential VQSR parameters are present vqsr_fields = {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'} require(input_fields > vqsr_fields, 'Missing parameters for VQSR:\n{}'.format(', '.join(vqsr_fields - input_fields))) # Check that hard filtering parameters are present. If only running preprocessing steps, then we do # not need filtering information. elif not inputs['preprocess_only']: hard_filter_fields = {'snp_filter_name', 'snp_filter_expression', 'indel_filter_name', 'indel_filter_expression'} require(input_fields > hard_filter_fields, 'Missing parameters for hard filtering:\n{}'.format(', '.join(hard_filter_fields - input_fields))) # Check for falsey hard filtering parameters for hard_filter_field in hard_filter_fields: require(inputs[hard_filter_field], 'Missing %s value for hard filtering, ' 'got %s.' % (hard_filter_field, inputs[hard_filter_field])) # Set resource parameters inputs['xmx'] = human2bytes(inputs['xmx']) inputs['file_size'] = human2bytes(inputs['file_size']) inputs['cores'] = int(inputs['cores']) inputs['annotations'] = set(inputs['snp_filter_annotations'] + inputs['indel_filter_annotations']) # HaplotypeCaller test data for testing inputs['hc_output'] = inputs.get('hc_output', None) # It is a toil-scripts convention to store input parameters in a Namespace object config = argparse.Namespace(**inputs) root = Job.wrapJobFn(run_gatk_germline_pipeline, samples, config) Job.Runner.startToil(root, options)
[ "def", "main", "(", ")", ":", "# Define Parser object and add to jobTree", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "__doc__", ",", "formatter_class", "=", "argparse", ".", "RawTextHelpFormatter", ")", "# Generate subparsers", "subparsers", "=", "parser", ".", "add_subparsers", "(", "dest", "=", "'command'", ")", "subparsers", ".", "add_parser", "(", "'generate-config'", ",", "help", "=", "'Generates an editable config in the current working directory.'", ")", "subparsers", ".", "add_parser", "(", "'generate-manifest'", ",", "help", "=", "'Generates an editable manifest in the current working directory.'", ")", "subparsers", ".", "add_parser", "(", "'generate'", ",", "help", "=", "'Generates a config and manifest in the current working directory.'", ")", "# Run subparser", "parser_run", "=", "subparsers", ".", "add_parser", "(", "'run'", ",", "help", "=", "'Runs the GATK germline pipeline'", ")", "parser_run", ".", "add_argument", "(", "'--config'", ",", "required", "=", "True", ",", "type", "=", "str", ",", "help", "=", "'Path to the (filled in) config file, generated with '", "'\"generate-config\".'", ")", "parser_run", ".", "add_argument", "(", "'--manifest'", ",", "type", "=", "str", ",", "help", "=", "'Path to the (filled in) manifest file, generated with '", "'\"generate-manifest\".\\nDefault value: \"%(default)s\".'", ")", "parser_run", ".", "add_argument", "(", "'--sample'", ",", "default", "=", "None", ",", "nargs", "=", "2", ",", "type", "=", "str", ",", "help", "=", "'Input sample identifier and BAM file URL or local path'", ")", "parser_run", ".", "add_argument", "(", "'--output-dir'", ",", "default", "=", "None", ",", "help", "=", "'Path/URL to output directory'", ")", "parser_run", ".", "add_argument", "(", "'-s'", ",", "'--suffix'", ",", "default", "=", "None", ",", "help", "=", "'Additional suffix to add to the names of the output files'", ")", "parser_run", ".", "add_argument", "(", "'--preprocess-only'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Only runs preprocessing steps'", ")", "Job", ".", "Runner", ".", "addToilOptions", "(", "parser_run", ")", "options", "=", "parser", ".", "parse_args", "(", ")", "cwd", "=", "os", ".", "getcwd", "(", ")", "if", "options", ".", "command", "==", "'generate-config'", "or", "options", ".", "command", "==", "'generate'", ":", "generate_file", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "'config-toil-germline.yaml'", ")", ",", "generate_config", ")", "if", "options", ".", "command", "==", "'generate-manifest'", "or", "options", ".", "command", "==", "'generate'", ":", "generate_file", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "'manifest-toil-germline.tsv'", ")", ",", "generate_manifest", ")", "elif", "options", ".", "command", "==", "'run'", ":", "# Program checks", "for", "program", "in", "[", "'curl'", ",", "'docker'", "]", ":", "require", "(", "next", "(", "which", "(", "program", ")", ")", ",", "program", "+", "' must be installed on every node.'", ".", "format", "(", "program", ")", ")", "require", "(", "os", ".", "path", ".", "exists", "(", "options", ".", "config", ")", ",", "'{} not found. Please run \"generate-config\"'", ".", "format", "(", "options", ".", "config", ")", ")", "# Read sample manifest", "samples", "=", "[", "]", "if", "options", ".", "manifest", ":", "samples", ".", "extend", "(", "parse_manifest", "(", "options", ".", "manifest", ")", ")", "# Add BAM sample from command line", "if", "options", ".", "sample", ":", "uuid", ",", "url", "=", "options", ".", "sample", "# samples tuple: (uuid, url, paired_url, rg_line)", "# BAM samples should not have as paired URL or read group line", "samples", ".", "append", "(", "GermlineSample", "(", "uuid", ",", "url", ",", "None", ",", "None", ")", ")", "require", "(", "len", "(", "samples", ")", ">", "0", ",", "'No samples were detected in the manifest or on the command line'", ")", "# Parse inputs", "inputs", "=", "{", "x", ".", "replace", "(", "'-'", ",", "'_'", ")", ":", "y", "for", "x", ",", "y", "in", "yaml", ".", "load", "(", "open", "(", "options", ".", "config", ")", ".", "read", "(", ")", ")", ".", "iteritems", "(", ")", "}", "required_fields", "=", "{", "'genome_fasta'", ",", "'output_dir'", ",", "'run_bwa'", ",", "'sorted'", ",", "'snp_filter_annotations'", ",", "'indel_filter_annotations'", ",", "'preprocess'", ",", "'preprocess_only'", ",", "'run_vqsr'", ",", "'joint_genotype'", ",", "'run_oncotator'", ",", "'cores'", ",", "'file_size'", ",", "'xmx'", ",", "'suffix'", "}", "input_fields", "=", "set", "(", "inputs", ".", "keys", "(", ")", ")", "require", "(", "input_fields", ">", "required_fields", ",", "'Missing config parameters:\\n{}'", ".", "format", "(", "', '", ".", "join", "(", "required_fields", "-", "input_fields", ")", ")", ")", "if", "inputs", "[", "'output_dir'", "]", "is", "None", ":", "inputs", "[", "'output_dir'", "]", "=", "options", ".", "output_dir", "require", "(", "inputs", "[", "'output_dir'", "]", "is", "not", "None", ",", "'Missing output directory PATH/URL'", ")", "if", "inputs", "[", "'suffix'", "]", "is", "None", ":", "inputs", "[", "'suffix'", "]", "=", "options", ".", "suffix", "if", "options", ".", "suffix", "else", "''", "if", "inputs", "[", "'preprocess_only'", "]", "is", "None", ":", "inputs", "[", "'preprocess_only'", "]", "=", "options", ".", "preprocess_only", "if", "inputs", "[", "'run_vqsr'", "]", ":", "# Check that essential VQSR parameters are present", "vqsr_fields", "=", "{", "'g1k_snp'", ",", "'mills'", ",", "'dbsnp'", ",", "'hapmap'", ",", "'omni'", "}", "require", "(", "input_fields", ">", "vqsr_fields", ",", "'Missing parameters for VQSR:\\n{}'", ".", "format", "(", "', '", ".", "join", "(", "vqsr_fields", "-", "input_fields", ")", ")", ")", "# Check that hard filtering parameters are present. If only running preprocessing steps, then we do", "# not need filtering information.", "elif", "not", "inputs", "[", "'preprocess_only'", "]", ":", "hard_filter_fields", "=", "{", "'snp_filter_name'", ",", "'snp_filter_expression'", ",", "'indel_filter_name'", ",", "'indel_filter_expression'", "}", "require", "(", "input_fields", ">", "hard_filter_fields", ",", "'Missing parameters for hard filtering:\\n{}'", ".", "format", "(", "', '", ".", "join", "(", "hard_filter_fields", "-", "input_fields", ")", ")", ")", "# Check for falsey hard filtering parameters", "for", "hard_filter_field", "in", "hard_filter_fields", ":", "require", "(", "inputs", "[", "hard_filter_field", "]", ",", "'Missing %s value for hard filtering, '", "'got %s.'", "%", "(", "hard_filter_field", ",", "inputs", "[", "hard_filter_field", "]", ")", ")", "# Set resource parameters", "inputs", "[", "'xmx'", "]", "=", "human2bytes", "(", "inputs", "[", "'xmx'", "]", ")", "inputs", "[", "'file_size'", "]", "=", "human2bytes", "(", "inputs", "[", "'file_size'", "]", ")", "inputs", "[", "'cores'", "]", "=", "int", "(", "inputs", "[", "'cores'", "]", ")", "inputs", "[", "'annotations'", "]", "=", "set", "(", "inputs", "[", "'snp_filter_annotations'", "]", "+", "inputs", "[", "'indel_filter_annotations'", "]", ")", "# HaplotypeCaller test data for testing", "inputs", "[", "'hc_output'", "]", "=", "inputs", ".", "get", "(", "'hc_output'", ",", "None", ")", "# It is a toil-scripts convention to store input parameters in a Namespace object", "config", "=", "argparse", ".", "Namespace", "(", "*", "*", "inputs", ")", "root", "=", "Job", ".", "wrapJobFn", "(", "run_gatk_germline_pipeline", ",", "samples", ",", "config", ")", "Job", ".", "Runner", ".", "startToil", "(", "root", ",", "options", ")" ]
GATK germline pipeline with variant filtering and annotation.
[ "GATK", "germline", "pipeline", "with", "variant", "filtering", "and", "annotation", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L752-L894
BD2KGenomics/toil-scripts
src/toil_scripts/adam_gatk_pipeline/align_and_call.py
sample_loop
def sample_loop(job, uuid_list, inputs): """ Loops over the sample_ids (uuids) in the manifest, creating child jobs to process each """ for uuid_rg in uuid_list: uuid_items = uuid_rg.split(',') uuid = uuid_items[0] rg_line = None if len(uuid_items) > 1: rg_line = uuid_items[1] job.addChildJobFn(static_dag, uuid, rg_line, inputs)
python
def sample_loop(job, uuid_list, inputs): """ Loops over the sample_ids (uuids) in the manifest, creating child jobs to process each """ for uuid_rg in uuid_list: uuid_items = uuid_rg.split(',') uuid = uuid_items[0] rg_line = None if len(uuid_items) > 1: rg_line = uuid_items[1] job.addChildJobFn(static_dag, uuid, rg_line, inputs)
[ "def", "sample_loop", "(", "job", ",", "uuid_list", ",", "inputs", ")", ":", "for", "uuid_rg", "in", "uuid_list", ":", "uuid_items", "=", "uuid_rg", ".", "split", "(", "','", ")", "uuid", "=", "uuid_items", "[", "0", "]", "rg_line", "=", "None", "if", "len", "(", "uuid_items", ")", ">", "1", ":", "rg_line", "=", "uuid_items", "[", "1", "]", "job", ".", "addChildJobFn", "(", "static_dag", ",", "uuid", ",", "rg_line", ",", "inputs", ")" ]
Loops over the sample_ids (uuids) in the manifest, creating child jobs to process each
[ "Loops", "over", "the", "sample_ids", "(", "uuids", ")", "in", "the", "manifest", "creating", "child", "jobs", "to", "process", "each" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_gatk_pipeline/align_and_call.py#L135-L148
BD2KGenomics/toil-scripts
src/toil_scripts/adam_gatk_pipeline/align_and_call.py
static_dag
def static_dag(job, uuid, rg_line, inputs): """ Prefer this here as it allows us to pull the job functions from other jobs without rewrapping the job functions back together. bwa_inputs: Input arguments to be passed to BWA. adam_inputs: Input arguments to be passed to ADAM. gatk_preprocess_inputs: Input arguments to be passed to GATK preprocessing. gatk_adam_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of ADAM preprocessing. gatk_gatk_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of GATK preprocessing. """ # get work directory work_dir = job.fileStore.getLocalTempDir() inputs.cpu_count = cpu_count() inputs.maxCores = sys.maxint args = {'uuid': uuid, 's3_bucket': inputs.s3_bucket, 'sequence_dir': inputs.sequence_dir, 'dir_suffix': inputs.dir_suffix} # get head BWA alignment job function and encapsulate it inputs.rg_line = rg_line inputs.output_dir = 's3://{s3_bucket}/alignment{dir_suffix}'.format(**args) bwa = job.wrapJobFn(download_reference_files, inputs, [[uuid, ['s3://{s3_bucket}/{sequence_dir}/{uuid}_1.fastq.gz'.format(**args), 's3://{s3_bucket}/{sequence_dir}/{uuid}_2.fastq.gz'.format(**args)]]]).encapsulate() # get head ADAM preprocessing job function and encapsulate it adam_preprocess = job.wrapJobFn(static_adam_preprocessing_dag, inputs, 's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args), 's3://{s3_bucket}/analysis{dir_suffix}/{uuid}'.format(**args), suffix='.adam').encapsulate() # Configure options for Toil Germline pipeline. This function call only runs the preprocessing steps. gatk_preprocessing_inputs = copy.deepcopy(inputs) gatk_preprocessing_inputs.suffix = '.gatk' gatk_preprocessing_inputs.preprocess = True gatk_preprocessing_inputs.preprocess_only = True gatk_preprocessing_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args) # get head GATK preprocessing job function and encapsulate it gatk_preprocess = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args), None, # Does not require second URL or RG_Line None), gatk_preprocessing_inputs).encapsulate() # Configure options for Toil Germline pipeline for preprocessed ADAM BAM file. adam_call_inputs = inputs adam_call_inputs.suffix = '.adam' adam_call_inputs.sorted = True adam_call_inputs.preprocess = False adam_call_inputs.run_vqsr = False adam_call_inputs.joint_genotype = False adam_call_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args) # get head GATK haplotype caller job function for the result of ADAM preprocessing and encapsulate it gatk_adam_call = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 's3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.adam.bam'.format(**args), None, None), adam_call_inputs).encapsulate() # Configure options for Toil Germline pipeline for preprocessed GATK BAM file. gatk_call_inputs = copy.deepcopy(inputs) gatk_call_inputs.sorted = True gatk_call_inputs.preprocess = False gatk_call_inputs.run_vqsr = False gatk_call_inputs.joint_genotype = False gatk_call_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args) # get head GATK haplotype caller job function for the result of GATK preprocessing and encapsulate it gatk_gatk_call = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 'S3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.gatk.bam'.format(**args), None, None), gatk_call_inputs).encapsulate() # wire up dag if not inputs.skip_alignment: job.addChild(bwa) if (inputs.pipeline_to_run == "adam" or inputs.pipeline_to_run == "both"): if inputs.skip_preprocessing: job.addChild(gatk_adam_call) else: if inputs.skip_alignment: job.addChild(adam_preprocess) else: bwa.addChild(adam_preprocess) adam_preprocess.addChild(gatk_adam_call) if (inputs.pipeline_to_run == "gatk" or inputs.pipeline_to_run == "both"): if inputs.skip_preprocessing: job.addChild(gatk_gatk_call) else: if inputs.skip_alignment: job.addChild(gatk_preprocess) else: bwa.addChild(gatk_preprocess) gatk_preprocess.addChild(gatk_gatk_call)
python
def static_dag(job, uuid, rg_line, inputs): """ Prefer this here as it allows us to pull the job functions from other jobs without rewrapping the job functions back together. bwa_inputs: Input arguments to be passed to BWA. adam_inputs: Input arguments to be passed to ADAM. gatk_preprocess_inputs: Input arguments to be passed to GATK preprocessing. gatk_adam_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of ADAM preprocessing. gatk_gatk_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of GATK preprocessing. """ # get work directory work_dir = job.fileStore.getLocalTempDir() inputs.cpu_count = cpu_count() inputs.maxCores = sys.maxint args = {'uuid': uuid, 's3_bucket': inputs.s3_bucket, 'sequence_dir': inputs.sequence_dir, 'dir_suffix': inputs.dir_suffix} # get head BWA alignment job function and encapsulate it inputs.rg_line = rg_line inputs.output_dir = 's3://{s3_bucket}/alignment{dir_suffix}'.format(**args) bwa = job.wrapJobFn(download_reference_files, inputs, [[uuid, ['s3://{s3_bucket}/{sequence_dir}/{uuid}_1.fastq.gz'.format(**args), 's3://{s3_bucket}/{sequence_dir}/{uuid}_2.fastq.gz'.format(**args)]]]).encapsulate() # get head ADAM preprocessing job function and encapsulate it adam_preprocess = job.wrapJobFn(static_adam_preprocessing_dag, inputs, 's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args), 's3://{s3_bucket}/analysis{dir_suffix}/{uuid}'.format(**args), suffix='.adam').encapsulate() # Configure options for Toil Germline pipeline. This function call only runs the preprocessing steps. gatk_preprocessing_inputs = copy.deepcopy(inputs) gatk_preprocessing_inputs.suffix = '.gatk' gatk_preprocessing_inputs.preprocess = True gatk_preprocessing_inputs.preprocess_only = True gatk_preprocessing_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args) # get head GATK preprocessing job function and encapsulate it gatk_preprocess = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args), None, # Does not require second URL or RG_Line None), gatk_preprocessing_inputs).encapsulate() # Configure options for Toil Germline pipeline for preprocessed ADAM BAM file. adam_call_inputs = inputs adam_call_inputs.suffix = '.adam' adam_call_inputs.sorted = True adam_call_inputs.preprocess = False adam_call_inputs.run_vqsr = False adam_call_inputs.joint_genotype = False adam_call_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args) # get head GATK haplotype caller job function for the result of ADAM preprocessing and encapsulate it gatk_adam_call = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 's3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.adam.bam'.format(**args), None, None), adam_call_inputs).encapsulate() # Configure options for Toil Germline pipeline for preprocessed GATK BAM file. gatk_call_inputs = copy.deepcopy(inputs) gatk_call_inputs.sorted = True gatk_call_inputs.preprocess = False gatk_call_inputs.run_vqsr = False gatk_call_inputs.joint_genotype = False gatk_call_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args) # get head GATK haplotype caller job function for the result of GATK preprocessing and encapsulate it gatk_gatk_call = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 'S3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.gatk.bam'.format(**args), None, None), gatk_call_inputs).encapsulate() # wire up dag if not inputs.skip_alignment: job.addChild(bwa) if (inputs.pipeline_to_run == "adam" or inputs.pipeline_to_run == "both"): if inputs.skip_preprocessing: job.addChild(gatk_adam_call) else: if inputs.skip_alignment: job.addChild(adam_preprocess) else: bwa.addChild(adam_preprocess) adam_preprocess.addChild(gatk_adam_call) if (inputs.pipeline_to_run == "gatk" or inputs.pipeline_to_run == "both"): if inputs.skip_preprocessing: job.addChild(gatk_gatk_call) else: if inputs.skip_alignment: job.addChild(gatk_preprocess) else: bwa.addChild(gatk_preprocess) gatk_preprocess.addChild(gatk_gatk_call)
[ "def", "static_dag", "(", "job", ",", "uuid", ",", "rg_line", ",", "inputs", ")", ":", "# get work directory", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "inputs", ".", "cpu_count", "=", "cpu_count", "(", ")", "inputs", ".", "maxCores", "=", "sys", ".", "maxint", "args", "=", "{", "'uuid'", ":", "uuid", ",", "'s3_bucket'", ":", "inputs", ".", "s3_bucket", ",", "'sequence_dir'", ":", "inputs", ".", "sequence_dir", ",", "'dir_suffix'", ":", "inputs", ".", "dir_suffix", "}", "# get head BWA alignment job function and encapsulate it", "inputs", ".", "rg_line", "=", "rg_line", "inputs", ".", "output_dir", "=", "'s3://{s3_bucket}/alignment{dir_suffix}'", ".", "format", "(", "*", "*", "args", ")", "bwa", "=", "job", ".", "wrapJobFn", "(", "download_reference_files", ",", "inputs", ",", "[", "[", "uuid", ",", "[", "'s3://{s3_bucket}/{sequence_dir}/{uuid}_1.fastq.gz'", ".", "format", "(", "*", "*", "args", ")", ",", "'s3://{s3_bucket}/{sequence_dir}/{uuid}_2.fastq.gz'", ".", "format", "(", "*", "*", "args", ")", "]", "]", "]", ")", ".", "encapsulate", "(", ")", "# get head ADAM preprocessing job function and encapsulate it", "adam_preprocess", "=", "job", ".", "wrapJobFn", "(", "static_adam_preprocessing_dag", ",", "inputs", ",", "'s3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'", ".", "format", "(", "*", "*", "args", ")", ",", "'s3://{s3_bucket}/analysis{dir_suffix}/{uuid}'", ".", "format", "(", "*", "*", "args", ")", ",", "suffix", "=", "'.adam'", ")", ".", "encapsulate", "(", ")", "# Configure options for Toil Germline pipeline. This function call only runs the preprocessing steps.", "gatk_preprocessing_inputs", "=", "copy", ".", "deepcopy", "(", "inputs", ")", "gatk_preprocessing_inputs", ".", "suffix", "=", "'.gatk'", "gatk_preprocessing_inputs", ".", "preprocess", "=", "True", "gatk_preprocessing_inputs", ".", "preprocess_only", "=", "True", "gatk_preprocessing_inputs", ".", "output_dir", "=", "'s3://{s3_bucket}/analysis{dir_suffix}'", ".", "format", "(", "*", "*", "args", ")", "# get head GATK preprocessing job function and encapsulate it", "gatk_preprocess", "=", "job", ".", "wrapJobFn", "(", "run_gatk_germline_pipeline", ",", "GermlineSample", "(", "uuid", ",", "'s3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'", ".", "format", "(", "*", "*", "args", ")", ",", "None", ",", "# Does not require second URL or RG_Line", "None", ")", ",", "gatk_preprocessing_inputs", ")", ".", "encapsulate", "(", ")", "# Configure options for Toil Germline pipeline for preprocessed ADAM BAM file.", "adam_call_inputs", "=", "inputs", "adam_call_inputs", ".", "suffix", "=", "'.adam'", "adam_call_inputs", ".", "sorted", "=", "True", "adam_call_inputs", ".", "preprocess", "=", "False", "adam_call_inputs", ".", "run_vqsr", "=", "False", "adam_call_inputs", ".", "joint_genotype", "=", "False", "adam_call_inputs", ".", "output_dir", "=", "'s3://{s3_bucket}/analysis{dir_suffix}'", ".", "format", "(", "*", "*", "args", ")", "# get head GATK haplotype caller job function for the result of ADAM preprocessing and encapsulate it", "gatk_adam_call", "=", "job", ".", "wrapJobFn", "(", "run_gatk_germline_pipeline", ",", "GermlineSample", "(", "uuid", ",", "'s3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.adam.bam'", ".", "format", "(", "*", "*", "args", ")", ",", "None", ",", "None", ")", ",", "adam_call_inputs", ")", ".", "encapsulate", "(", ")", "# Configure options for Toil Germline pipeline for preprocessed GATK BAM file.", "gatk_call_inputs", "=", "copy", ".", "deepcopy", "(", "inputs", ")", "gatk_call_inputs", ".", "sorted", "=", "True", "gatk_call_inputs", ".", "preprocess", "=", "False", "gatk_call_inputs", ".", "run_vqsr", "=", "False", "gatk_call_inputs", ".", "joint_genotype", "=", "False", "gatk_call_inputs", ".", "output_dir", "=", "'s3://{s3_bucket}/analysis{dir_suffix}'", ".", "format", "(", "*", "*", "args", ")", "# get head GATK haplotype caller job function for the result of GATK preprocessing and encapsulate it", "gatk_gatk_call", "=", "job", ".", "wrapJobFn", "(", "run_gatk_germline_pipeline", ",", "GermlineSample", "(", "uuid", ",", "'S3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.gatk.bam'", ".", "format", "(", "*", "*", "args", ")", ",", "None", ",", "None", ")", ",", "gatk_call_inputs", ")", ".", "encapsulate", "(", ")", "# wire up dag", "if", "not", "inputs", ".", "skip_alignment", ":", "job", ".", "addChild", "(", "bwa", ")", "if", "(", "inputs", ".", "pipeline_to_run", "==", "\"adam\"", "or", "inputs", ".", "pipeline_to_run", "==", "\"both\"", ")", ":", "if", "inputs", ".", "skip_preprocessing", ":", "job", ".", "addChild", "(", "gatk_adam_call", ")", "else", ":", "if", "inputs", ".", "skip_alignment", ":", "job", ".", "addChild", "(", "adam_preprocess", ")", "else", ":", "bwa", ".", "addChild", "(", "adam_preprocess", ")", "adam_preprocess", ".", "addChild", "(", "gatk_adam_call", ")", "if", "(", "inputs", ".", "pipeline_to_run", "==", "\"gatk\"", "or", "inputs", ".", "pipeline_to_run", "==", "\"both\"", ")", ":", "if", "inputs", ".", "skip_preprocessing", ":", "job", ".", "addChild", "(", "gatk_gatk_call", ")", "else", ":", "if", "inputs", ".", "skip_alignment", ":", "job", ".", "addChild", "(", "gatk_preprocess", ")", "else", ":", "bwa", ".", "addChild", "(", "gatk_preprocess", ")", "gatk_preprocess", ".", "addChild", "(", "gatk_gatk_call", ")" ]
Prefer this here as it allows us to pull the job functions from other jobs without rewrapping the job functions back together. bwa_inputs: Input arguments to be passed to BWA. adam_inputs: Input arguments to be passed to ADAM. gatk_preprocess_inputs: Input arguments to be passed to GATK preprocessing. gatk_adam_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of ADAM preprocessing. gatk_gatk_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of GATK preprocessing.
[ "Prefer", "this", "here", "as", "it", "allows", "us", "to", "pull", "the", "job", "functions", "from", "other", "jobs", "without", "rewrapping", "the", "job", "functions", "back", "together", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_gatk_pipeline/align_and_call.py#L151-L266
BD2KGenomics/toil-scripts
src/toil_scripts/adam_gatk_pipeline/align_and_call.py
main
def main(): """ This is a Toil pipeline used to perform alignment of fastqs. """ # Define Parser object and add to Toil if mock_mode(): usage_msg = 'You have the TOIL_SCRIPTS_MOCK_MODE environment variable set, so this pipeline ' \ 'will run in mock mode. To disable mock mode, set TOIL_SCRIPTS_MOCK_MODE=0' else: usage_msg = None parser = argparse.ArgumentParser(usage=usage_msg) subparsers = parser.add_subparsers(dest='command') subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.') subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.') subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.') # Run subparser parser_run = subparsers.add_parser('run', help='Runs the ADAM/GATK pipeline') default_config = 'adam-gatk-mock.config' if mock_mode() else 'adam-gatk.config' default_manifest = 'adam-gatk-mock-manifest.csv' if mock_mode() else 'adam-gatk-manifest.csv' parser_run.add_argument('--config', default=default_config, type=str, help='Path to the (filled in) config file, generated with "generate-config".') parser_run.add_argument('--manifest', default=default_manifest, type=str, help='Path to the (filled in) manifest file, generated with "generate-manifest". ' '\nDefault value: "%(default)s".') Job.Runner.addToilOptions(parser_run) args = parser.parse_args() cwd = os.getcwd() if args.command == 'generate-config' or args.command == 'generate': generate_file(os.path.join(cwd, default_config), generate_config) if args.command == 'generate-manifest' or args.command == 'generate': generate_file(os.path.join(cwd, default_manifest), generate_manifest) # Pipeline execution elif args.command == 'run': require(os.path.exists(args.config), '{} not found. Please run ' 'generate-config'.format(args.config)) if not hasattr(args, 'sample'): require(os.path.exists(args.manifest), '{} not found and no samples provided. Please ' 'run "generate-manifest"'.format(args.manifest)) # Parse config parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()} inputs = argparse.Namespace(**parsed_config) # Parse manifest file uuid_list = [] with open(args.manifest) as f_manifest: for line in f_manifest: if not line.isspace() and not line.startswith('#'): uuid_list.append(line.strip()) inputs.sort = False if not inputs.dir_suffix: inputs.dir_suffix = '' if not inputs.s3_bucket: inputs.s3_bucket = '' if inputs.master_ip and inputs.num_nodes: raise ValueError("Exactly one of master_ip (%s) and num_nodes (%d) must be provided." % (inputs.master_ip, inputs.num_nodes)) if not hasattr(inputs, 'master_ip') and inputs.num_nodes <= 1: raise ValueError('num_nodes allocates one Spark/HDFS master and n-1 workers, and thus must be greater ' 'than 1. %d was passed.' % inputs.num_nodes) if (inputs.pipeline_to_run != "adam" and inputs.pipeline_to_run != "gatk" and inputs.pipeline_to_run != "both"): raise ValueError("pipeline_to_run must be either 'adam', 'gatk', or 'both'. %s was passed." % inputs.pipeline_to_run) Job.Runner.startToil(Job.wrapJobFn(sample_loop, uuid_list, inputs), args)
python
def main(): """ This is a Toil pipeline used to perform alignment of fastqs. """ # Define Parser object and add to Toil if mock_mode(): usage_msg = 'You have the TOIL_SCRIPTS_MOCK_MODE environment variable set, so this pipeline ' \ 'will run in mock mode. To disable mock mode, set TOIL_SCRIPTS_MOCK_MODE=0' else: usage_msg = None parser = argparse.ArgumentParser(usage=usage_msg) subparsers = parser.add_subparsers(dest='command') subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.') subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.') subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.') # Run subparser parser_run = subparsers.add_parser('run', help='Runs the ADAM/GATK pipeline') default_config = 'adam-gatk-mock.config' if mock_mode() else 'adam-gatk.config' default_manifest = 'adam-gatk-mock-manifest.csv' if mock_mode() else 'adam-gatk-manifest.csv' parser_run.add_argument('--config', default=default_config, type=str, help='Path to the (filled in) config file, generated with "generate-config".') parser_run.add_argument('--manifest', default=default_manifest, type=str, help='Path to the (filled in) manifest file, generated with "generate-manifest". ' '\nDefault value: "%(default)s".') Job.Runner.addToilOptions(parser_run) args = parser.parse_args() cwd = os.getcwd() if args.command == 'generate-config' or args.command == 'generate': generate_file(os.path.join(cwd, default_config), generate_config) if args.command == 'generate-manifest' or args.command == 'generate': generate_file(os.path.join(cwd, default_manifest), generate_manifest) # Pipeline execution elif args.command == 'run': require(os.path.exists(args.config), '{} not found. Please run ' 'generate-config'.format(args.config)) if not hasattr(args, 'sample'): require(os.path.exists(args.manifest), '{} not found and no samples provided. Please ' 'run "generate-manifest"'.format(args.manifest)) # Parse config parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()} inputs = argparse.Namespace(**parsed_config) # Parse manifest file uuid_list = [] with open(args.manifest) as f_manifest: for line in f_manifest: if not line.isspace() and not line.startswith('#'): uuid_list.append(line.strip()) inputs.sort = False if not inputs.dir_suffix: inputs.dir_suffix = '' if not inputs.s3_bucket: inputs.s3_bucket = '' if inputs.master_ip and inputs.num_nodes: raise ValueError("Exactly one of master_ip (%s) and num_nodes (%d) must be provided." % (inputs.master_ip, inputs.num_nodes)) if not hasattr(inputs, 'master_ip') and inputs.num_nodes <= 1: raise ValueError('num_nodes allocates one Spark/HDFS master and n-1 workers, and thus must be greater ' 'than 1. %d was passed.' % inputs.num_nodes) if (inputs.pipeline_to_run != "adam" and inputs.pipeline_to_run != "gatk" and inputs.pipeline_to_run != "both"): raise ValueError("pipeline_to_run must be either 'adam', 'gatk', or 'both'. %s was passed." % inputs.pipeline_to_run) Job.Runner.startToil(Job.wrapJobFn(sample_loop, uuid_list, inputs), args)
[ "def", "main", "(", ")", ":", "# Define Parser object and add to Toil", "if", "mock_mode", "(", ")", ":", "usage_msg", "=", "'You have the TOIL_SCRIPTS_MOCK_MODE environment variable set, so this pipeline '", "'will run in mock mode. To disable mock mode, set TOIL_SCRIPTS_MOCK_MODE=0'", "else", ":", "usage_msg", "=", "None", "parser", "=", "argparse", ".", "ArgumentParser", "(", "usage", "=", "usage_msg", ")", "subparsers", "=", "parser", ".", "add_subparsers", "(", "dest", "=", "'command'", ")", "subparsers", ".", "add_parser", "(", "'generate-config'", ",", "help", "=", "'Generates an editable config in the current working directory.'", ")", "subparsers", ".", "add_parser", "(", "'generate-manifest'", ",", "help", "=", "'Generates an editable manifest in the current working directory.'", ")", "subparsers", ".", "add_parser", "(", "'generate'", ",", "help", "=", "'Generates a config and manifest in the current working directory.'", ")", "# Run subparser ", "parser_run", "=", "subparsers", ".", "add_parser", "(", "'run'", ",", "help", "=", "'Runs the ADAM/GATK pipeline'", ")", "default_config", "=", "'adam-gatk-mock.config'", "if", "mock_mode", "(", ")", "else", "'adam-gatk.config'", "default_manifest", "=", "'adam-gatk-mock-manifest.csv'", "if", "mock_mode", "(", ")", "else", "'adam-gatk-manifest.csv'", "parser_run", ".", "add_argument", "(", "'--config'", ",", "default", "=", "default_config", ",", "type", "=", "str", ",", "help", "=", "'Path to the (filled in) config file, generated with \"generate-config\".'", ")", "parser_run", ".", "add_argument", "(", "'--manifest'", ",", "default", "=", "default_manifest", ",", "type", "=", "str", ",", "help", "=", "'Path to the (filled in) manifest file, generated with \"generate-manifest\". '", "'\\nDefault value: \"%(default)s\".'", ")", "Job", ".", "Runner", ".", "addToilOptions", "(", "parser_run", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "cwd", "=", "os", ".", "getcwd", "(", ")", "if", "args", ".", "command", "==", "'generate-config'", "or", "args", ".", "command", "==", "'generate'", ":", "generate_file", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "default_config", ")", ",", "generate_config", ")", "if", "args", ".", "command", "==", "'generate-manifest'", "or", "args", ".", "command", "==", "'generate'", ":", "generate_file", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "default_manifest", ")", ",", "generate_manifest", ")", "# Pipeline execution", "elif", "args", ".", "command", "==", "'run'", ":", "require", "(", "os", ".", "path", ".", "exists", "(", "args", ".", "config", ")", ",", "'{} not found. Please run '", "'generate-config'", ".", "format", "(", "args", ".", "config", ")", ")", "if", "not", "hasattr", "(", "args", ",", "'sample'", ")", ":", "require", "(", "os", ".", "path", ".", "exists", "(", "args", ".", "manifest", ")", ",", "'{} not found and no samples provided. Please '", "'run \"generate-manifest\"'", ".", "format", "(", "args", ".", "manifest", ")", ")", "# Parse config", "parsed_config", "=", "{", "x", ".", "replace", "(", "'-'", ",", "'_'", ")", ":", "y", "for", "x", ",", "y", "in", "yaml", ".", "load", "(", "open", "(", "args", ".", "config", ")", ".", "read", "(", ")", ")", ".", "iteritems", "(", ")", "}", "inputs", "=", "argparse", ".", "Namespace", "(", "*", "*", "parsed_config", ")", "# Parse manifest file", "uuid_list", "=", "[", "]", "with", "open", "(", "args", ".", "manifest", ")", "as", "f_manifest", ":", "for", "line", "in", "f_manifest", ":", "if", "not", "line", ".", "isspace", "(", ")", "and", "not", "line", ".", "startswith", "(", "'#'", ")", ":", "uuid_list", ".", "append", "(", "line", ".", "strip", "(", ")", ")", "inputs", ".", "sort", "=", "False", "if", "not", "inputs", ".", "dir_suffix", ":", "inputs", ".", "dir_suffix", "=", "''", "if", "not", "inputs", ".", "s3_bucket", ":", "inputs", ".", "s3_bucket", "=", "''", "if", "inputs", ".", "master_ip", "and", "inputs", ".", "num_nodes", ":", "raise", "ValueError", "(", "\"Exactly one of master_ip (%s) and num_nodes (%d) must be provided.\"", "%", "(", "inputs", ".", "master_ip", ",", "inputs", ".", "num_nodes", ")", ")", "if", "not", "hasattr", "(", "inputs", ",", "'master_ip'", ")", "and", "inputs", ".", "num_nodes", "<=", "1", ":", "raise", "ValueError", "(", "'num_nodes allocates one Spark/HDFS master and n-1 workers, and thus must be greater '", "'than 1. %d was passed.'", "%", "inputs", ".", "num_nodes", ")", "if", "(", "inputs", ".", "pipeline_to_run", "!=", "\"adam\"", "and", "inputs", ".", "pipeline_to_run", "!=", "\"gatk\"", "and", "inputs", ".", "pipeline_to_run", "!=", "\"both\"", ")", ":", "raise", "ValueError", "(", "\"pipeline_to_run must be either 'adam', 'gatk', or 'both'. %s was passed.\"", "%", "inputs", ".", "pipeline_to_run", ")", "Job", ".", "Runner", ".", "startToil", "(", "Job", ".", "wrapJobFn", "(", "sample_loop", ",", "uuid_list", ",", "inputs", ")", ",", "args", ")" ]
This is a Toil pipeline used to perform alignment of fastqs.
[ "This", "is", "a", "Toil", "pipeline", "used", "to", "perform", "alignment", "of", "fastqs", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_gatk_pipeline/align_and_call.py#L388-L458
BD2KGenomics/toil-scripts
src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py
generate_unique_key
def generate_unique_key(master_key_path, url): """ Input1: Path to the BD2K Master Key (for S3 Encryption) Input2: S3 URL (e.g. https://s3-us-west-2.amazonaws.com/cgl-driver-projects-encrypted/wcdt/exome_bams/DTB-111-N.bam) Returns: 32-byte unique key generated for that URL """ with open(master_key_path, 'r') as f: master_key = f.read() assert len(master_key) == 32, 'Invalid Key! Must be 32 characters. ' \ 'Key: {}, Length: {}'.format(master_key, len(master_key)) new_key = hashlib.sha256(master_key + url).digest() assert len(new_key) == 32, 'New key is invalid and is not 32 characters: {}'.format(new_key) return new_key
python
def generate_unique_key(master_key_path, url): """ Input1: Path to the BD2K Master Key (for S3 Encryption) Input2: S3 URL (e.g. https://s3-us-west-2.amazonaws.com/cgl-driver-projects-encrypted/wcdt/exome_bams/DTB-111-N.bam) Returns: 32-byte unique key generated for that URL """ with open(master_key_path, 'r') as f: master_key = f.read() assert len(master_key) == 32, 'Invalid Key! Must be 32 characters. ' \ 'Key: {}, Length: {}'.format(master_key, len(master_key)) new_key = hashlib.sha256(master_key + url).digest() assert len(new_key) == 32, 'New key is invalid and is not 32 characters: {}'.format(new_key) return new_key
[ "def", "generate_unique_key", "(", "master_key_path", ",", "url", ")", ":", "with", "open", "(", "master_key_path", ",", "'r'", ")", "as", "f", ":", "master_key", "=", "f", ".", "read", "(", ")", "assert", "len", "(", "master_key", ")", "==", "32", ",", "'Invalid Key! Must be 32 characters. '", "'Key: {}, Length: {}'", ".", "format", "(", "master_key", ",", "len", "(", "master_key", ")", ")", "new_key", "=", "hashlib", ".", "sha256", "(", "master_key", "+", "url", ")", ".", "digest", "(", ")", "assert", "len", "(", "new_key", ")", "==", "32", ",", "'New key is invalid and is not 32 characters: {}'", ".", "format", "(", "new_key", ")", "return", "new_key" ]
Input1: Path to the BD2K Master Key (for S3 Encryption) Input2: S3 URL (e.g. https://s3-us-west-2.amazonaws.com/cgl-driver-projects-encrypted/wcdt/exome_bams/DTB-111-N.bam) Returns: 32-byte unique key generated for that URL
[ "Input1", ":", "Path", "to", "the", "BD2K", "Master", "Key", "(", "for", "S3", "Encryption", ")", "Input2", ":", "S3", "URL", "(", "e", ".", "g", ".", "https", ":", "//", "s3", "-", "us", "-", "west", "-", "2", ".", "amazonaws", ".", "com", "/", "cgl", "-", "driver", "-", "projects", "-", "encrypted", "/", "wcdt", "/", "exome_bams", "/", "DTB", "-", "111", "-", "N", ".", "bam", ")" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py#L49-L62
BD2KGenomics/toil-scripts
src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py
download_encrypted_file
def download_encrypted_file(work_dir, url, key_path, name): """ Downloads encrypted file from S3 Input1: Working directory Input2: S3 URL to be downloaded Input3: Path to key necessary for decryption Input4: name of file to be downloaded """ file_path = os.path.join(work_dir, name) key = generate_unique_key(key_path, url) encoded_key = base64.b64encode(key) encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest()) h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256' h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key) h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5) try: subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path]) except OSError: raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"') assert os.path.exists(file_path)
python
def download_encrypted_file(work_dir, url, key_path, name): """ Downloads encrypted file from S3 Input1: Working directory Input2: S3 URL to be downloaded Input3: Path to key necessary for decryption Input4: name of file to be downloaded """ file_path = os.path.join(work_dir, name) key = generate_unique_key(key_path, url) encoded_key = base64.b64encode(key) encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest()) h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256' h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key) h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5) try: subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path]) except OSError: raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"') assert os.path.exists(file_path)
[ "def", "download_encrypted_file", "(", "work_dir", ",", "url", ",", "key_path", ",", "name", ")", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "name", ")", "key", "=", "generate_unique_key", "(", "key_path", ",", "url", ")", "encoded_key", "=", "base64", ".", "b64encode", "(", "key", ")", "encoded_key_md5", "=", "base64", ".", "b64encode", "(", "hashlib", ".", "md5", "(", "key", ")", ".", "digest", "(", ")", ")", "h1", "=", "'x-amz-server-side-encryption-customer-algorithm:AES256'", "h2", "=", "'x-amz-server-side-encryption-customer-key:{}'", ".", "format", "(", "encoded_key", ")", "h3", "=", "'x-amz-server-side-encryption-customer-key-md5:{}'", ".", "format", "(", "encoded_key_md5", ")", "try", ":", "subprocess", ".", "check_call", "(", "[", "'curl'", ",", "'-fs'", ",", "'--retry'", ",", "'5'", ",", "'-H'", ",", "h1", ",", "'-H'", ",", "h2", ",", "'-H'", ",", "h3", ",", "url", ",", "'-o'", ",", "file_path", "]", ")", "except", "OSError", ":", "raise", "RuntimeError", "(", "'Failed to find \"curl\". Install via \"apt-get install curl\"'", ")", "assert", "os", ".", "path", ".", "exists", "(", "file_path", ")" ]
Downloads encrypted file from S3 Input1: Working directory Input2: S3 URL to be downloaded Input3: Path to key necessary for decryption Input4: name of file to be downloaded
[ "Downloads", "encrypted", "file", "from", "S3" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py#L65-L85
BD2KGenomics/toil-scripts
src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py
return_input_paths
def return_input_paths(job, work_dir, ids, *args): """ Returns the paths of files from the FileStore Input1: Toil job instance Input2: Working directory Input3: jobstore id dictionary Input4: names of files to be returned from the jobstore Returns: path(s) to the file(s) requested -- unpack these! """ paths = OrderedDict() for name in args: if not os.path.exists(os.path.join(work_dir, name)): file_path = job.fileStore.readGlobalFile(ids[name], os.path.join(work_dir, name)) else: file_path = os.path.join(work_dir, name) paths[name] = file_path if len(args) == 1: return file_path return paths.values()
python
def return_input_paths(job, work_dir, ids, *args): """ Returns the paths of files from the FileStore Input1: Toil job instance Input2: Working directory Input3: jobstore id dictionary Input4: names of files to be returned from the jobstore Returns: path(s) to the file(s) requested -- unpack these! """ paths = OrderedDict() for name in args: if not os.path.exists(os.path.join(work_dir, name)): file_path = job.fileStore.readGlobalFile(ids[name], os.path.join(work_dir, name)) else: file_path = os.path.join(work_dir, name) paths[name] = file_path if len(args) == 1: return file_path return paths.values()
[ "def", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "*", "args", ")", ":", "paths", "=", "OrderedDict", "(", ")", "for", "name", "in", "args", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "name", ")", ")", ":", "file_path", "=", "job", ".", "fileStore", ".", "readGlobalFile", "(", "ids", "[", "name", "]", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "name", ")", ")", "else", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "name", ")", "paths", "[", "name", "]", "=", "file_path", "if", "len", "(", "args", ")", "==", "1", ":", "return", "file_path", "return", "paths", ".", "values", "(", ")" ]
Returns the paths of files from the FileStore Input1: Toil job instance Input2: Working directory Input3: jobstore id dictionary Input4: names of files to be returned from the jobstore Returns: path(s) to the file(s) requested -- unpack these!
[ "Returns", "the", "paths", "of", "files", "from", "the", "FileStore" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py#L111-L132
BD2KGenomics/toil-scripts
src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py
move_to_output_dir
def move_to_output_dir(work_dir, output_dir, uuid=None, files=list()): """ Moves files from work_dir to output_dir Input1: Working directory Input2: Output directory Input3: UUID to be preprended onto file name Input4: list of file names to be moved from working dir to output dir """ for fname in files: if uuid is None: shutil.move(os.path.join(work_dir, fname), os.path.join(output_dir, fname)) else: shutil.move(os.path.join(work_dir, fname), os.path.join(output_dir, '{}.{}'.format(uuid, fname)))
python
def move_to_output_dir(work_dir, output_dir, uuid=None, files=list()): """ Moves files from work_dir to output_dir Input1: Working directory Input2: Output directory Input3: UUID to be preprended onto file name Input4: list of file names to be moved from working dir to output dir """ for fname in files: if uuid is None: shutil.move(os.path.join(work_dir, fname), os.path.join(output_dir, fname)) else: shutil.move(os.path.join(work_dir, fname), os.path.join(output_dir, '{}.{}'.format(uuid, fname)))
[ "def", "move_to_output_dir", "(", "work_dir", ",", "output_dir", ",", "uuid", "=", "None", ",", "files", "=", "list", "(", ")", ")", ":", "for", "fname", "in", "files", ":", "if", "uuid", "is", "None", ":", "shutil", ".", "move", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "fname", ")", ",", "os", ".", "path", ".", "join", "(", "output_dir", ",", "fname", ")", ")", "else", ":", "shutil", ".", "move", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "fname", ")", ",", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'{}.{}'", ".", "format", "(", "uuid", ",", "fname", ")", ")", ")" ]
Moves files from work_dir to output_dir Input1: Working directory Input2: Output directory Input3: UUID to be preprended onto file name Input4: list of file names to be moved from working dir to output dir
[ "Moves", "files", "from", "work_dir", "to", "output_dir" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py#L135-L148
BD2KGenomics/toil-scripts
src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py
batch_start
def batch_start(job, input_args): """ Downloads shared files that are used by all samples for alignment and places them in the jobstore. """ shared_files = ['ref.fa', 'ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai'] shared_ids = {} for fname in shared_files: url = input_args[fname] shared_ids[fname] = job.addChildJobFn(download_from_url, url, fname).rv() job.addFollowOnJobFn(spawn_batch_jobs, shared_ids, input_args)
python
def batch_start(job, input_args): """ Downloads shared files that are used by all samples for alignment and places them in the jobstore. """ shared_files = ['ref.fa', 'ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai'] shared_ids = {} for fname in shared_files: url = input_args[fname] shared_ids[fname] = job.addChildJobFn(download_from_url, url, fname).rv() job.addFollowOnJobFn(spawn_batch_jobs, shared_ids, input_args)
[ "def", "batch_start", "(", "job", ",", "input_args", ")", ":", "shared_files", "=", "[", "'ref.fa'", ",", "'ref.fa.amb'", ",", "'ref.fa.ann'", ",", "'ref.fa.bwt'", ",", "'ref.fa.pac'", ",", "'ref.fa.sa'", ",", "'ref.fa.fai'", "]", "shared_ids", "=", "{", "}", "for", "fname", "in", "shared_files", ":", "url", "=", "input_args", "[", "fname", "]", "shared_ids", "[", "fname", "]", "=", "job", ".", "addChildJobFn", "(", "download_from_url", ",", "url", ",", "fname", ")", ".", "rv", "(", ")", "job", ".", "addFollowOnJobFn", "(", "spawn_batch_jobs", ",", "shared_ids", ",", "input_args", ")" ]
Downloads shared files that are used by all samples for alignment and places them in the jobstore.
[ "Downloads", "shared", "files", "that", "are", "used", "by", "all", "samples", "for", "alignment", "and", "places", "them", "in", "the", "jobstore", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py#L152-L161
BD2KGenomics/toil-scripts
src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py
spawn_batch_jobs
def spawn_batch_jobs(job, shared_ids, input_args): """ Spawns an alignment job for every sample in the input configuration file """ samples = [] config = input_args['config'] with open(config, 'r') as f_in: for line in f_in: line = line.strip().split(',') uuid = line[0] urls = line[1:] samples.append((uuid, urls)) for sample in samples: job.addChildJobFn(alignment, shared_ids, input_args, sample, cores=32, memory='20 G', disk='100 G')
python
def spawn_batch_jobs(job, shared_ids, input_args): """ Spawns an alignment job for every sample in the input configuration file """ samples = [] config = input_args['config'] with open(config, 'r') as f_in: for line in f_in: line = line.strip().split(',') uuid = line[0] urls = line[1:] samples.append((uuid, urls)) for sample in samples: job.addChildJobFn(alignment, shared_ids, input_args, sample, cores=32, memory='20 G', disk='100 G')
[ "def", "spawn_batch_jobs", "(", "job", ",", "shared_ids", ",", "input_args", ")", ":", "samples", "=", "[", "]", "config", "=", "input_args", "[", "'config'", "]", "with", "open", "(", "config", ",", "'r'", ")", "as", "f_in", ":", "for", "line", "in", "f_in", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "','", ")", "uuid", "=", "line", "[", "0", "]", "urls", "=", "line", "[", "1", ":", "]", "samples", ".", "append", "(", "(", "uuid", ",", "urls", ")", ")", "for", "sample", "in", "samples", ":", "job", ".", "addChildJobFn", "(", "alignment", ",", "shared_ids", ",", "input_args", ",", "sample", ",", "cores", "=", "32", ",", "memory", "=", "'20 G'", ",", "disk", "=", "'100 G'", ")" ]
Spawns an alignment job for every sample in the input configuration file
[ "Spawns", "an", "alignment", "job", "for", "every", "sample", "in", "the", "input", "configuration", "file" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py#L164-L177
BD2KGenomics/toil-scripts
src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py
alignment
def alignment(job, ids, input_args, sample): """ Runs BWA and then Bamsort on the supplied fastqs for this sample Input1: Toil Job instance Input2: jobstore id dictionary Input3: Input arguments dictionary Input4: Sample tuple -- contains uuid and urls for the sample """ uuid, urls = sample # ids['bam'] = job.fileStore.getEmptyFileStoreID() work_dir = job.fileStore.getLocalTempDir() output_dir = input_args['output_dir'] key_path = input_args['ssec'] cores = multiprocessing.cpu_count() # I/O return_input_paths(job, work_dir, ids, 'ref.fa', 'ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai') # Get fastqs associated with this sample for url in urls: download_encrypted_file(work_dir, url, key_path, os.path.basename(url)) # Parameters for BWA and Bamsort docker_cmd = ['docker', 'run', '--rm', '-v', '{}:/data'.format(work_dir)] bwa_command = ["jvivian/bwa", "mem", "-R", "@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper".format(uuid), "-T", str(0), "-t", str(cores), "/data/ref.fa"] + [os.path.join('/data/', os.path.basename(x)) for x in urls] bamsort_command = ["jeltje/biobambam", "/usr/local/bin/bamsort", "inputformat=sam", "level=1", "inputthreads={}".format(cores), "outputthreads={}".format(cores), "calmdnm=1", "calmdnmrecompindetonly=1", "calmdnmreference=/data/ref.fa", "I=/data/{}".format(uuid + '.sam')] # Piping the output to a file handle with open(os.path.join(work_dir, uuid + '.sam'), 'w') as f_out: subprocess.check_call(docker_cmd + bwa_command, stdout=f_out) with open(os.path.join(work_dir, uuid + '.bam'), 'w') as f_out: subprocess.check_call(docker_cmd + bamsort_command, stdout=f_out) # Save in JobStore # job.fileStore.updateGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam')) ids['bam'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, uuid + '.bam')) # Copy file to S3 if input_args['s3_dir']: job.addChildJobFn(upload_bam_to_s3, ids, input_args, sample, cores=32, memory='20 G', disk='30 G') # Move file in output_dir if input_args['output_dir']: move_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.bam'])
python
def alignment(job, ids, input_args, sample): """ Runs BWA and then Bamsort on the supplied fastqs for this sample Input1: Toil Job instance Input2: jobstore id dictionary Input3: Input arguments dictionary Input4: Sample tuple -- contains uuid and urls for the sample """ uuid, urls = sample # ids['bam'] = job.fileStore.getEmptyFileStoreID() work_dir = job.fileStore.getLocalTempDir() output_dir = input_args['output_dir'] key_path = input_args['ssec'] cores = multiprocessing.cpu_count() # I/O return_input_paths(job, work_dir, ids, 'ref.fa', 'ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai') # Get fastqs associated with this sample for url in urls: download_encrypted_file(work_dir, url, key_path, os.path.basename(url)) # Parameters for BWA and Bamsort docker_cmd = ['docker', 'run', '--rm', '-v', '{}:/data'.format(work_dir)] bwa_command = ["jvivian/bwa", "mem", "-R", "@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper".format(uuid), "-T", str(0), "-t", str(cores), "/data/ref.fa"] + [os.path.join('/data/', os.path.basename(x)) for x in urls] bamsort_command = ["jeltje/biobambam", "/usr/local/bin/bamsort", "inputformat=sam", "level=1", "inputthreads={}".format(cores), "outputthreads={}".format(cores), "calmdnm=1", "calmdnmrecompindetonly=1", "calmdnmreference=/data/ref.fa", "I=/data/{}".format(uuid + '.sam')] # Piping the output to a file handle with open(os.path.join(work_dir, uuid + '.sam'), 'w') as f_out: subprocess.check_call(docker_cmd + bwa_command, stdout=f_out) with open(os.path.join(work_dir, uuid + '.bam'), 'w') as f_out: subprocess.check_call(docker_cmd + bamsort_command, stdout=f_out) # Save in JobStore # job.fileStore.updateGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam')) ids['bam'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, uuid + '.bam')) # Copy file to S3 if input_args['s3_dir']: job.addChildJobFn(upload_bam_to_s3, ids, input_args, sample, cores=32, memory='20 G', disk='30 G') # Move file in output_dir if input_args['output_dir']: move_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.bam'])
[ "def", "alignment", "(", "job", ",", "ids", ",", "input_args", ",", "sample", ")", ":", "uuid", ",", "urls", "=", "sample", "# ids['bam'] = job.fileStore.getEmptyFileStoreID()", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "output_dir", "=", "input_args", "[", "'output_dir'", "]", "key_path", "=", "input_args", "[", "'ssec'", "]", "cores", "=", "multiprocessing", ".", "cpu_count", "(", ")", "# I/O", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'ref.fa'", ",", "'ref.fa.amb'", ",", "'ref.fa.ann'", ",", "'ref.fa.bwt'", ",", "'ref.fa.pac'", ",", "'ref.fa.sa'", ",", "'ref.fa.fai'", ")", "# Get fastqs associated with this sample", "for", "url", "in", "urls", ":", "download_encrypted_file", "(", "work_dir", ",", "url", ",", "key_path", ",", "os", ".", "path", ".", "basename", "(", "url", ")", ")", "# Parameters for BWA and Bamsort", "docker_cmd", "=", "[", "'docker'", ",", "'run'", ",", "'--rm'", ",", "'-v'", ",", "'{}:/data'", ".", "format", "(", "work_dir", ")", "]", "bwa_command", "=", "[", "\"jvivian/bwa\"", ",", "\"mem\"", ",", "\"-R\"", ",", "\"@RG\\tID:{0}\\tPL:Illumina\\tSM:{0}\\tLB:KapaHyper\"", ".", "format", "(", "uuid", ")", ",", "\"-T\"", ",", "str", "(", "0", ")", ",", "\"-t\"", ",", "str", "(", "cores", ")", ",", "\"/data/ref.fa\"", "]", "+", "[", "os", ".", "path", ".", "join", "(", "'/data/'", ",", "os", ".", "path", ".", "basename", "(", "x", ")", ")", "for", "x", "in", "urls", "]", "bamsort_command", "=", "[", "\"jeltje/biobambam\"", ",", "\"/usr/local/bin/bamsort\"", ",", "\"inputformat=sam\"", ",", "\"level=1\"", ",", "\"inputthreads={}\"", ".", "format", "(", "cores", ")", ",", "\"outputthreads={}\"", ".", "format", "(", "cores", ")", ",", "\"calmdnm=1\"", ",", "\"calmdnmrecompindetonly=1\"", ",", "\"calmdnmreference=/data/ref.fa\"", ",", "\"I=/data/{}\"", ".", "format", "(", "uuid", "+", "'.sam'", ")", "]", "# Piping the output to a file handle", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "uuid", "+", "'.sam'", ")", ",", "'w'", ")", "as", "f_out", ":", "subprocess", ".", "check_call", "(", "docker_cmd", "+", "bwa_command", ",", "stdout", "=", "f_out", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "uuid", "+", "'.bam'", ")", ",", "'w'", ")", "as", "f_out", ":", "subprocess", ".", "check_call", "(", "docker_cmd", "+", "bamsort_command", ",", "stdout", "=", "f_out", ")", "# Save in JobStore", "# job.fileStore.updateGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam'))", "ids", "[", "'bam'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "uuid", "+", "'.bam'", ")", ")", "# Copy file to S3", "if", "input_args", "[", "'s3_dir'", "]", ":", "job", ".", "addChildJobFn", "(", "upload_bam_to_s3", ",", "ids", ",", "input_args", ",", "sample", ",", "cores", "=", "32", ",", "memory", "=", "'20 G'", ",", "disk", "=", "'30 G'", ")", "# Move file in output_dir", "if", "input_args", "[", "'output_dir'", "]", ":", "move_to_output_dir", "(", "work_dir", ",", "output_dir", ",", "uuid", "=", "None", ",", "files", "=", "[", "uuid", "+", "'.bam'", "]", ")" ]
Runs BWA and then Bamsort on the supplied fastqs for this sample Input1: Toil Job instance Input2: jobstore id dictionary Input3: Input arguments dictionary Input4: Sample tuple -- contains uuid and urls for the sample
[ "Runs", "BWA", "and", "then", "Bamsort", "on", "the", "supplied", "fastqs", "for", "this", "sample" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py#L180-L238
BD2KGenomics/toil-scripts
src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py
upload_bam_to_s3
def upload_bam_to_s3(job, ids, input_args, sample): """ Uploads output BAM from sample to S3 Input1: Toil Job instance Input2: jobstore id dictionary Input3: Input arguments dictionary Input4: Sample tuple -- contains uuid and urls for the sample """ uuid, urls = sample key_path = input_args['ssec'] work_dir = job.fileStore.getLocalTempDir() # Parse s3_dir to get bucket and s3 path s3_dir = input_args['s3_dir'] bucket_name = s3_dir.lstrip('/').split('/')[0] bucket_dir = '/'.join(s3_dir.lstrip('/').split('/')[1:]) base_url = 'https://s3-us-west-2.amazonaws.com/' url = os.path.join(base_url, bucket_name, bucket_dir, uuid + '.bam') #I/O job.fileStore.readGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam')) # Generate keyfile for upload with open(os.path.join(work_dir, uuid + '.key'), 'wb') as f_out: f_out.write(generate_unique_key(key_path, url)) # Commands to upload to S3 via S3AM s3am_command = ['s3am', 'upload', '--sse-key-file', os.path.join(work_dir, uuid + '.key'), 'file://{}'.format(os.path.join(work_dir, uuid + '.bam')), bucket_name, os.path.join(bucket_dir, uuid + '.bam')] subprocess.check_call(s3am_command)
python
def upload_bam_to_s3(job, ids, input_args, sample): """ Uploads output BAM from sample to S3 Input1: Toil Job instance Input2: jobstore id dictionary Input3: Input arguments dictionary Input4: Sample tuple -- contains uuid and urls for the sample """ uuid, urls = sample key_path = input_args['ssec'] work_dir = job.fileStore.getLocalTempDir() # Parse s3_dir to get bucket and s3 path s3_dir = input_args['s3_dir'] bucket_name = s3_dir.lstrip('/').split('/')[0] bucket_dir = '/'.join(s3_dir.lstrip('/').split('/')[1:]) base_url = 'https://s3-us-west-2.amazonaws.com/' url = os.path.join(base_url, bucket_name, bucket_dir, uuid + '.bam') #I/O job.fileStore.readGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam')) # Generate keyfile for upload with open(os.path.join(work_dir, uuid + '.key'), 'wb') as f_out: f_out.write(generate_unique_key(key_path, url)) # Commands to upload to S3 via S3AM s3am_command = ['s3am', 'upload', '--sse-key-file', os.path.join(work_dir, uuid + '.key'), 'file://{}'.format(os.path.join(work_dir, uuid + '.bam')), bucket_name, os.path.join(bucket_dir, uuid + '.bam')] subprocess.check_call(s3am_command)
[ "def", "upload_bam_to_s3", "(", "job", ",", "ids", ",", "input_args", ",", "sample", ")", ":", "uuid", ",", "urls", "=", "sample", "key_path", "=", "input_args", "[", "'ssec'", "]", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "# Parse s3_dir to get bucket and s3 path", "s3_dir", "=", "input_args", "[", "'s3_dir'", "]", "bucket_name", "=", "s3_dir", ".", "lstrip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "[", "0", "]", "bucket_dir", "=", "'/'", ".", "join", "(", "s3_dir", ".", "lstrip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "[", "1", ":", "]", ")", "base_url", "=", "'https://s3-us-west-2.amazonaws.com/'", "url", "=", "os", ".", "path", ".", "join", "(", "base_url", ",", "bucket_name", ",", "bucket_dir", ",", "uuid", "+", "'.bam'", ")", "#I/O", "job", ".", "fileStore", ".", "readGlobalFile", "(", "ids", "[", "'bam'", "]", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "uuid", "+", "'.bam'", ")", ")", "# Generate keyfile for upload", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "uuid", "+", "'.key'", ")", ",", "'wb'", ")", "as", "f_out", ":", "f_out", ".", "write", "(", "generate_unique_key", "(", "key_path", ",", "url", ")", ")", "# Commands to upload to S3 via S3AM", "s3am_command", "=", "[", "'s3am'", ",", "'upload'", ",", "'--sse-key-file'", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "uuid", "+", "'.key'", ")", ",", "'file://{}'", ".", "format", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "uuid", "+", "'.bam'", ")", ")", ",", "bucket_name", ",", "os", ".", "path", ".", "join", "(", "bucket_dir", ",", "uuid", "+", "'.bam'", ")", "]", "subprocess", ".", "check_call", "(", "s3am_command", ")" ]
Uploads output BAM from sample to S3 Input1: Toil Job instance Input2: jobstore id dictionary Input3: Input arguments dictionary Input4: Sample tuple -- contains uuid and urls for the sample
[ "Uploads", "output", "BAM", "from", "sample", "to", "S3" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/bwa_alignment/old_alignment_script/batch_align.py#L241-L272
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/vqsr.py
vqsr_pipeline
def vqsr_pipeline(job, uuid, vcf_id, config): """ Runs GATK Variant Quality Score Recalibration. 0: Start 0 --> 1 --> 3 --> 4 --> 5 1: Recalibrate SNPs | | 2: Recalibrate INDELS +-> 2 -+ 3: Apply SNP Recalibration 4: Apply INDEL Recalibration 5: Write VCF to output directory :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: unique sample identifier :param str vcf_id: VCF FileStoreID :param Namespace config: Pipeline configuration options and shared files Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.cores Number of cores for each job config.xmx Java heap size in bytes config.suffix Suffix for output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption SNP VQSR attributes: config.snp_filter_annotations List of GATK variant annotations config.hapmap FileStoreID for HapMap resource file config.omni FileStoreID for Omni resource file config.dbsnp FileStoreID for dbSNP resource file config.g1k_snp FileStoreID for 1000G SNP resource file INDEL VQSR attributes: config.indel_filter_annotations List of GATK variant annotations config.dbsnp FileStoreID for dbSNP resource file config.mills FileStoreID for Mills resource file :return: SNP and INDEL VQSR VCF FileStoreID :rtype: str """ # Get the total size of the genome reference genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size # The VariantRecalibator disk requirement depends on the input VCF, the resource files, # the genome reference files, and the output recalibration table, tranche file, and plots. # The sum of these output files are less than the input VCF. snp_resources = ['hapmap', 'omni', 'dbsnp', 'g1k_snp'] snp_resource_size = sum(getattr(config, resource).size for resource in snp_resources) snp_recal_disk = PromisedRequirement(lambda in_vcf, ref_size, resource_size: 2 * in_vcf.size + ref_size + resource_size, vcf_id, genome_ref_size, snp_resource_size) snp_recal = job.wrapJobFn(gatk_variant_recalibrator, 'SNP', vcf_id, config.genome_fasta, config.genome_fai, config.genome_dict, get_short_annotations(config.snp_filter_annotations), hapmap=config.hapmap, omni=config.omni, phase=config.g1k_snp, dbsnp=config.dbsnp, unsafe_mode=config.unsafe_mode, disk=snp_recal_disk, cores=config.cores, memory=config.xmx) indel_resource_size = config.mills.size + config.dbsnp.size indel_recal_disk = PromisedRequirement(lambda in_vcf, ref_size, resource_size: 2 * in_vcf.size + ref_size + resource_size, vcf_id, genome_ref_size, indel_resource_size) indel_recal = job.wrapJobFn(gatk_variant_recalibrator, 'INDEL', vcf_id, config.genome_fasta, config.genome_fai, config.genome_dict, get_short_annotations(config.indel_filter_annotations), dbsnp=config.dbsnp, mills=config.mills, unsafe_mode=config.unsafe_mode, disk=indel_recal_disk, cores=config.cores, memory=config.xmx) # The ApplyRecalibration disk requirement depends on the input VCF size, the variant # recalibration table, the tranche file, the genome reference file, and the output VCF. # This step labels variants as filtered, so the output VCF file should be slightly larger # than the input file. Estimate a 10% increase in the VCF file size. apply_snp_recal_disk = PromisedRequirement(lambda in_vcf, recal, tranche, ref_size: int(2.1 * in_vcf.size + recal.size + tranche.size + ref_size), vcf_id, snp_recal.rv(0), snp_recal.rv(1), genome_ref_size) apply_snp_recal = job.wrapJobFn(gatk_apply_variant_recalibration, 'SNP', vcf_id, snp_recal.rv(0), snp_recal.rv(1), config.genome_fasta, config.genome_fai, config.genome_dict, unsafe_mode=config.unsafe_mode, disk=apply_snp_recal_disk, cores=config.cores, memory=config.xmx) apply_indel_recal_disk = PromisedRequirement(lambda in_vcf, recal, tranche, ref_size: int(2.1 * in_vcf.size + recal.size + tranche.size + ref_size), vcf_id, indel_recal.rv(0), indel_recal.rv(1), genome_ref_size) apply_indel_recal = job.wrapJobFn(gatk_apply_variant_recalibration, 'INDEL', apply_snp_recal.rv(), indel_recal.rv(0), indel_recal.rv(1), config.genome_fasta, config.genome_fai, config.genome_dict, unsafe_mode=config.unsafe_mode, disk=apply_indel_recal_disk, cores=config.cores, memory=config.xmx) job.addChild(snp_recal) job.addChild(indel_recal) snp_recal.addChild(apply_snp_recal) indel_recal.addChild(apply_indel_recal) apply_snp_recal.addChild(apply_indel_recal) # Output recalibrated VCF output_dir = config.output_dir output_dir = os.path.join(output_dir, uuid) vqsr_name = '%s.vqsr%s.vcf' % (uuid, config.suffix) output_vqsr = job.wrapJobFn(output_file_job, vqsr_name, apply_indel_recal.rv(), output_dir, s3_key_path=config.ssec, disk=PromisedRequirement(lambda x: x.size, apply_indel_recal.rv())) apply_indel_recal.addChild(output_vqsr) return apply_indel_recal.rv()
python
def vqsr_pipeline(job, uuid, vcf_id, config): """ Runs GATK Variant Quality Score Recalibration. 0: Start 0 --> 1 --> 3 --> 4 --> 5 1: Recalibrate SNPs | | 2: Recalibrate INDELS +-> 2 -+ 3: Apply SNP Recalibration 4: Apply INDEL Recalibration 5: Write VCF to output directory :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: unique sample identifier :param str vcf_id: VCF FileStoreID :param Namespace config: Pipeline configuration options and shared files Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.cores Number of cores for each job config.xmx Java heap size in bytes config.suffix Suffix for output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption SNP VQSR attributes: config.snp_filter_annotations List of GATK variant annotations config.hapmap FileStoreID for HapMap resource file config.omni FileStoreID for Omni resource file config.dbsnp FileStoreID for dbSNP resource file config.g1k_snp FileStoreID for 1000G SNP resource file INDEL VQSR attributes: config.indel_filter_annotations List of GATK variant annotations config.dbsnp FileStoreID for dbSNP resource file config.mills FileStoreID for Mills resource file :return: SNP and INDEL VQSR VCF FileStoreID :rtype: str """ # Get the total size of the genome reference genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size # The VariantRecalibator disk requirement depends on the input VCF, the resource files, # the genome reference files, and the output recalibration table, tranche file, and plots. # The sum of these output files are less than the input VCF. snp_resources = ['hapmap', 'omni', 'dbsnp', 'g1k_snp'] snp_resource_size = sum(getattr(config, resource).size for resource in snp_resources) snp_recal_disk = PromisedRequirement(lambda in_vcf, ref_size, resource_size: 2 * in_vcf.size + ref_size + resource_size, vcf_id, genome_ref_size, snp_resource_size) snp_recal = job.wrapJobFn(gatk_variant_recalibrator, 'SNP', vcf_id, config.genome_fasta, config.genome_fai, config.genome_dict, get_short_annotations(config.snp_filter_annotations), hapmap=config.hapmap, omni=config.omni, phase=config.g1k_snp, dbsnp=config.dbsnp, unsafe_mode=config.unsafe_mode, disk=snp_recal_disk, cores=config.cores, memory=config.xmx) indel_resource_size = config.mills.size + config.dbsnp.size indel_recal_disk = PromisedRequirement(lambda in_vcf, ref_size, resource_size: 2 * in_vcf.size + ref_size + resource_size, vcf_id, genome_ref_size, indel_resource_size) indel_recal = job.wrapJobFn(gatk_variant_recalibrator, 'INDEL', vcf_id, config.genome_fasta, config.genome_fai, config.genome_dict, get_short_annotations(config.indel_filter_annotations), dbsnp=config.dbsnp, mills=config.mills, unsafe_mode=config.unsafe_mode, disk=indel_recal_disk, cores=config.cores, memory=config.xmx) # The ApplyRecalibration disk requirement depends on the input VCF size, the variant # recalibration table, the tranche file, the genome reference file, and the output VCF. # This step labels variants as filtered, so the output VCF file should be slightly larger # than the input file. Estimate a 10% increase in the VCF file size. apply_snp_recal_disk = PromisedRequirement(lambda in_vcf, recal, tranche, ref_size: int(2.1 * in_vcf.size + recal.size + tranche.size + ref_size), vcf_id, snp_recal.rv(0), snp_recal.rv(1), genome_ref_size) apply_snp_recal = job.wrapJobFn(gatk_apply_variant_recalibration, 'SNP', vcf_id, snp_recal.rv(0), snp_recal.rv(1), config.genome_fasta, config.genome_fai, config.genome_dict, unsafe_mode=config.unsafe_mode, disk=apply_snp_recal_disk, cores=config.cores, memory=config.xmx) apply_indel_recal_disk = PromisedRequirement(lambda in_vcf, recal, tranche, ref_size: int(2.1 * in_vcf.size + recal.size + tranche.size + ref_size), vcf_id, indel_recal.rv(0), indel_recal.rv(1), genome_ref_size) apply_indel_recal = job.wrapJobFn(gatk_apply_variant_recalibration, 'INDEL', apply_snp_recal.rv(), indel_recal.rv(0), indel_recal.rv(1), config.genome_fasta, config.genome_fai, config.genome_dict, unsafe_mode=config.unsafe_mode, disk=apply_indel_recal_disk, cores=config.cores, memory=config.xmx) job.addChild(snp_recal) job.addChild(indel_recal) snp_recal.addChild(apply_snp_recal) indel_recal.addChild(apply_indel_recal) apply_snp_recal.addChild(apply_indel_recal) # Output recalibrated VCF output_dir = config.output_dir output_dir = os.path.join(output_dir, uuid) vqsr_name = '%s.vqsr%s.vcf' % (uuid, config.suffix) output_vqsr = job.wrapJobFn(output_file_job, vqsr_name, apply_indel_recal.rv(), output_dir, s3_key_path=config.ssec, disk=PromisedRequirement(lambda x: x.size, apply_indel_recal.rv())) apply_indel_recal.addChild(output_vqsr) return apply_indel_recal.rv()
[ "def", "vqsr_pipeline", "(", "job", ",", "uuid", ",", "vcf_id", ",", "config", ")", ":", "# Get the total size of the genome reference", "genome_ref_size", "=", "config", ".", "genome_fasta", ".", "size", "+", "config", ".", "genome_fai", ".", "size", "+", "config", ".", "genome_dict", ".", "size", "# The VariantRecalibator disk requirement depends on the input VCF, the resource files,", "# the genome reference files, and the output recalibration table, tranche file, and plots.", "# The sum of these output files are less than the input VCF.", "snp_resources", "=", "[", "'hapmap'", ",", "'omni'", ",", "'dbsnp'", ",", "'g1k_snp'", "]", "snp_resource_size", "=", "sum", "(", "getattr", "(", "config", ",", "resource", ")", ".", "size", "for", "resource", "in", "snp_resources", ")", "snp_recal_disk", "=", "PromisedRequirement", "(", "lambda", "in_vcf", ",", "ref_size", ",", "resource_size", ":", "2", "*", "in_vcf", ".", "size", "+", "ref_size", "+", "resource_size", ",", "vcf_id", ",", "genome_ref_size", ",", "snp_resource_size", ")", "snp_recal", "=", "job", ".", "wrapJobFn", "(", "gatk_variant_recalibrator", ",", "'SNP'", ",", "vcf_id", ",", "config", ".", "genome_fasta", ",", "config", ".", "genome_fai", ",", "config", ".", "genome_dict", ",", "get_short_annotations", "(", "config", ".", "snp_filter_annotations", ")", ",", "hapmap", "=", "config", ".", "hapmap", ",", "omni", "=", "config", ".", "omni", ",", "phase", "=", "config", ".", "g1k_snp", ",", "dbsnp", "=", "config", ".", "dbsnp", ",", "unsafe_mode", "=", "config", ".", "unsafe_mode", ",", "disk", "=", "snp_recal_disk", ",", "cores", "=", "config", ".", "cores", ",", "memory", "=", "config", ".", "xmx", ")", "indel_resource_size", "=", "config", ".", "mills", ".", "size", "+", "config", ".", "dbsnp", ".", "size", "indel_recal_disk", "=", "PromisedRequirement", "(", "lambda", "in_vcf", ",", "ref_size", ",", "resource_size", ":", "2", "*", "in_vcf", ".", "size", "+", "ref_size", "+", "resource_size", ",", "vcf_id", ",", "genome_ref_size", ",", "indel_resource_size", ")", "indel_recal", "=", "job", ".", "wrapJobFn", "(", "gatk_variant_recalibrator", ",", "'INDEL'", ",", "vcf_id", ",", "config", ".", "genome_fasta", ",", "config", ".", "genome_fai", ",", "config", ".", "genome_dict", ",", "get_short_annotations", "(", "config", ".", "indel_filter_annotations", ")", ",", "dbsnp", "=", "config", ".", "dbsnp", ",", "mills", "=", "config", ".", "mills", ",", "unsafe_mode", "=", "config", ".", "unsafe_mode", ",", "disk", "=", "indel_recal_disk", ",", "cores", "=", "config", ".", "cores", ",", "memory", "=", "config", ".", "xmx", ")", "# The ApplyRecalibration disk requirement depends on the input VCF size, the variant", "# recalibration table, the tranche file, the genome reference file, and the output VCF.", "# This step labels variants as filtered, so the output VCF file should be slightly larger", "# than the input file. Estimate a 10% increase in the VCF file size.", "apply_snp_recal_disk", "=", "PromisedRequirement", "(", "lambda", "in_vcf", ",", "recal", ",", "tranche", ",", "ref_size", ":", "int", "(", "2.1", "*", "in_vcf", ".", "size", "+", "recal", ".", "size", "+", "tranche", ".", "size", "+", "ref_size", ")", ",", "vcf_id", ",", "snp_recal", ".", "rv", "(", "0", ")", ",", "snp_recal", ".", "rv", "(", "1", ")", ",", "genome_ref_size", ")", "apply_snp_recal", "=", "job", ".", "wrapJobFn", "(", "gatk_apply_variant_recalibration", ",", "'SNP'", ",", "vcf_id", ",", "snp_recal", ".", "rv", "(", "0", ")", ",", "snp_recal", ".", "rv", "(", "1", ")", ",", "config", ".", "genome_fasta", ",", "config", ".", "genome_fai", ",", "config", ".", "genome_dict", ",", "unsafe_mode", "=", "config", ".", "unsafe_mode", ",", "disk", "=", "apply_snp_recal_disk", ",", "cores", "=", "config", ".", "cores", ",", "memory", "=", "config", ".", "xmx", ")", "apply_indel_recal_disk", "=", "PromisedRequirement", "(", "lambda", "in_vcf", ",", "recal", ",", "tranche", ",", "ref_size", ":", "int", "(", "2.1", "*", "in_vcf", ".", "size", "+", "recal", ".", "size", "+", "tranche", ".", "size", "+", "ref_size", ")", ",", "vcf_id", ",", "indel_recal", ".", "rv", "(", "0", ")", ",", "indel_recal", ".", "rv", "(", "1", ")", ",", "genome_ref_size", ")", "apply_indel_recal", "=", "job", ".", "wrapJobFn", "(", "gatk_apply_variant_recalibration", ",", "'INDEL'", ",", "apply_snp_recal", ".", "rv", "(", ")", ",", "indel_recal", ".", "rv", "(", "0", ")", ",", "indel_recal", ".", "rv", "(", "1", ")", ",", "config", ".", "genome_fasta", ",", "config", ".", "genome_fai", ",", "config", ".", "genome_dict", ",", "unsafe_mode", "=", "config", ".", "unsafe_mode", ",", "disk", "=", "apply_indel_recal_disk", ",", "cores", "=", "config", ".", "cores", ",", "memory", "=", "config", ".", "xmx", ")", "job", ".", "addChild", "(", "snp_recal", ")", "job", ".", "addChild", "(", "indel_recal", ")", "snp_recal", ".", "addChild", "(", "apply_snp_recal", ")", "indel_recal", ".", "addChild", "(", "apply_indel_recal", ")", "apply_snp_recal", ".", "addChild", "(", "apply_indel_recal", ")", "# Output recalibrated VCF", "output_dir", "=", "config", ".", "output_dir", "output_dir", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "uuid", ")", "vqsr_name", "=", "'%s.vqsr%s.vcf'", "%", "(", "uuid", ",", "config", ".", "suffix", ")", "output_vqsr", "=", "job", ".", "wrapJobFn", "(", "output_file_job", ",", "vqsr_name", ",", "apply_indel_recal", ".", "rv", "(", ")", ",", "output_dir", ",", "s3_key_path", "=", "config", ".", "ssec", ",", "disk", "=", "PromisedRequirement", "(", "lambda", "x", ":", "x", ".", "size", ",", "apply_indel_recal", ".", "rv", "(", ")", ")", ")", "apply_indel_recal", ".", "addChild", "(", "output_vqsr", ")", "return", "apply_indel_recal", ".", "rv", "(", ")" ]
Runs GATK Variant Quality Score Recalibration. 0: Start 0 --> 1 --> 3 --> 4 --> 5 1: Recalibrate SNPs | | 2: Recalibrate INDELS +-> 2 -+ 3: Apply SNP Recalibration 4: Apply INDEL Recalibration 5: Write VCF to output directory :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: unique sample identifier :param str vcf_id: VCF FileStoreID :param Namespace config: Pipeline configuration options and shared files Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.cores Number of cores for each job config.xmx Java heap size in bytes config.suffix Suffix for output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption SNP VQSR attributes: config.snp_filter_annotations List of GATK variant annotations config.hapmap FileStoreID for HapMap resource file config.omni FileStoreID for Omni resource file config.dbsnp FileStoreID for dbSNP resource file config.g1k_snp FileStoreID for 1000G SNP resource file INDEL VQSR attributes: config.indel_filter_annotations List of GATK variant annotations config.dbsnp FileStoreID for dbSNP resource file config.mills FileStoreID for Mills resource file :return: SNP and INDEL VQSR VCF FileStoreID :rtype: str
[ "Runs", "GATK", "Variant", "Quality", "Score", "Recalibration", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/vqsr.py#L12-L162
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/vqsr.py
get_short_annotations
def get_short_annotations(annotations): """ Converts full GATK annotation name to the shortened version :param annotations: :return: """ # Annotations need to match VCF header short_name = {'QualByDepth': 'QD', 'FisherStrand': 'FS', 'StrandOddsRatio': 'SOR', 'ReadPosRankSumTest': 'ReadPosRankSum', 'MappingQualityRankSumTest': 'MQRankSum', 'RMSMappingQuality': 'MQ', 'InbreedingCoeff': 'ID'} short_annotations = [] for annotation in annotations: if annotation in short_name: annotation = short_name[annotation] short_annotations.append(annotation) return short_annotations
python
def get_short_annotations(annotations): """ Converts full GATK annotation name to the shortened version :param annotations: :return: """ # Annotations need to match VCF header short_name = {'QualByDepth': 'QD', 'FisherStrand': 'FS', 'StrandOddsRatio': 'SOR', 'ReadPosRankSumTest': 'ReadPosRankSum', 'MappingQualityRankSumTest': 'MQRankSum', 'RMSMappingQuality': 'MQ', 'InbreedingCoeff': 'ID'} short_annotations = [] for annotation in annotations: if annotation in short_name: annotation = short_name[annotation] short_annotations.append(annotation) return short_annotations
[ "def", "get_short_annotations", "(", "annotations", ")", ":", "# Annotations need to match VCF header", "short_name", "=", "{", "'QualByDepth'", ":", "'QD'", ",", "'FisherStrand'", ":", "'FS'", ",", "'StrandOddsRatio'", ":", "'SOR'", ",", "'ReadPosRankSumTest'", ":", "'ReadPosRankSum'", ",", "'MappingQualityRankSumTest'", ":", "'MQRankSum'", ",", "'RMSMappingQuality'", ":", "'MQ'", ",", "'InbreedingCoeff'", ":", "'ID'", "}", "short_annotations", "=", "[", "]", "for", "annotation", "in", "annotations", ":", "if", "annotation", "in", "short_name", ":", "annotation", "=", "short_name", "[", "annotation", "]", "short_annotations", ".", "append", "(", "annotation", ")", "return", "short_annotations" ]
Converts full GATK annotation name to the shortened version :param annotations: :return:
[ "Converts", "full", "GATK", "annotation", "name", "to", "the", "shortened", "version", ":", "param", "annotations", ":", ":", "return", ":" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/vqsr.py#L165-L185
BD2KGenomics/toil-scripts
src/toil_scripts/transfer_gtex_to_s3/transfer_gtex_to_s3.py
parse_sra
def parse_sra(path_to_config): """ Parses genetorrent config file. Returns list of samples: [ [id1, id1 ], [id2, id2], ... ] Returns duplicate of ids to follow UUID/URL standard. """ samples = [] with open(path_to_config, 'r') as f: for line in f.readlines(): if not line.isspace(): samples.append(line.strip()) return samples
python
def parse_sra(path_to_config): """ Parses genetorrent config file. Returns list of samples: [ [id1, id1 ], [id2, id2], ... ] Returns duplicate of ids to follow UUID/URL standard. """ samples = [] with open(path_to_config, 'r') as f: for line in f.readlines(): if not line.isspace(): samples.append(line.strip()) return samples
[ "def", "parse_sra", "(", "path_to_config", ")", ":", "samples", "=", "[", "]", "with", "open", "(", "path_to_config", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "if", "not", "line", ".", "isspace", "(", ")", ":", "samples", ".", "append", "(", "line", ".", "strip", "(", ")", ")", "return", "samples" ]
Parses genetorrent config file. Returns list of samples: [ [id1, id1 ], [id2, id2], ... ] Returns duplicate of ids to follow UUID/URL standard.
[ "Parses", "genetorrent", "config", "file", ".", "Returns", "list", "of", "samples", ":", "[", "[", "id1", "id1", "]", "[", "id2", "id2", "]", "...", "]", "Returns", "duplicate", "of", "ids", "to", "follow", "UUID", "/", "URL", "standard", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/transfer_gtex_to_s3/transfer_gtex_to_s3.py#L82-L92
BD2KGenomics/toil-scripts
src/toil_scripts/transfer_gtex_to_s3/transfer_gtex_to_s3.py
tarball_files
def tarball_files(work_dir, tar_name, uuid=None, files=None): """ Tars a group of files together into a tarball work_dir: str Current Working Directory tar_name: str Name of tarball uuid: str UUID to stamp files with files: str(s) List of filenames to place in the tarball from working directory """ with tarfile.open(os.path.join(work_dir, tar_name), 'w:gz') as f_out: for fname in files: if uuid: f_out.add(os.path.join(work_dir, fname), arcname=uuid + '.' + fname) else: f_out.add(os.path.join(work_dir, fname), arcname=fname)
python
def tarball_files(work_dir, tar_name, uuid=None, files=None): """ Tars a group of files together into a tarball work_dir: str Current Working Directory tar_name: str Name of tarball uuid: str UUID to stamp files with files: str(s) List of filenames to place in the tarball from working directory """ with tarfile.open(os.path.join(work_dir, tar_name), 'w:gz') as f_out: for fname in files: if uuid: f_out.add(os.path.join(work_dir, fname), arcname=uuid + '.' + fname) else: f_out.add(os.path.join(work_dir, fname), arcname=fname)
[ "def", "tarball_files", "(", "work_dir", ",", "tar_name", ",", "uuid", "=", "None", ",", "files", "=", "None", ")", ":", "with", "tarfile", ".", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "tar_name", ")", ",", "'w:gz'", ")", "as", "f_out", ":", "for", "fname", "in", "files", ":", "if", "uuid", ":", "f_out", ".", "add", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "fname", ")", ",", "arcname", "=", "uuid", "+", "'.'", "+", "fname", ")", "else", ":", "f_out", ".", "add", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "fname", ")", ",", "arcname", "=", "fname", ")" ]
Tars a group of files together into a tarball work_dir: str Current Working Directory tar_name: str Name of tarball uuid: str UUID to stamp files with files: str(s) List of filenames to place in the tarball from working directory
[ "Tars", "a", "group", "of", "files", "together", "into", "a", "tarball" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/transfer_gtex_to_s3/transfer_gtex_to_s3.py#L95-L109
BD2KGenomics/toil-scripts
src/toil_scripts/transfer_gtex_to_s3/transfer_gtex_to_s3.py
start_batch
def start_batch(job, input_args): """ This function will administer 5 jobs at a time then recursively call itself until subset is empty """ samples = parse_sra(input_args['sra']) # for analysis_id in samples: job.addChildJobFn(download_and_transfer_sample, input_args, samples, cores=1, disk='30')
python
def start_batch(job, input_args): """ This function will administer 5 jobs at a time then recursively call itself until subset is empty """ samples = parse_sra(input_args['sra']) # for analysis_id in samples: job.addChildJobFn(download_and_transfer_sample, input_args, samples, cores=1, disk='30')
[ "def", "start_batch", "(", "job", ",", "input_args", ")", ":", "samples", "=", "parse_sra", "(", "input_args", "[", "'sra'", "]", ")", "# for analysis_id in samples:", "job", ".", "addChildJobFn", "(", "download_and_transfer_sample", ",", "input_args", ",", "samples", ",", "cores", "=", "1", ",", "disk", "=", "'30'", ")" ]
This function will administer 5 jobs at a time then recursively call itself until subset is empty
[ "This", "function", "will", "administer", "5", "jobs", "at", "a", "time", "then", "recursively", "call", "itself", "until", "subset", "is", "empty" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/transfer_gtex_to_s3/transfer_gtex_to_s3.py#L113-L119
BD2KGenomics/toil-scripts
src/toil_scripts/transfer_gtex_to_s3/transfer_gtex_to_s3.py
download_and_transfer_sample
def download_and_transfer_sample(job, input_args, samples): """ Downloads a sample from dbGaP via SRAToolKit, then uses S3AM to transfer it to S3 input_args: dict Dictionary of input arguments analysis_id: str An analysis ID for a sample in CGHub """ if len(samples) > 1: a = samples[len(samples)/2:] b = samples[:len(samples)/2] job.addChildJobFn(download_and_transfer_sample, input_args, a, disk='30G') job.addChildJobFn(download_and_transfer_sample, input_args, b, disk='30G') else: analysis_id = samples[0] work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # Acquire dbgap_key shutil.copy(input_args['dbgap_key'], os.path.join(work_dir, 'dbgap.ngc')) # Call to fastq-dump to pull down SRA files and convert to fastq if input_args['single_end']: parameters = [analysis_id] else: parameters = ['--split-files', analysis_id] docker_call(tool='quay.io/ucsc_cgl/fastq-dump:2.5.7--4577a6c1a3c94adaa0c25dd6c03518ee610433d1', work_dir=work_dir, tool_parameters=parameters, sudo=sudo) # Collect files and encapsulate into a tarball shutil.rmtree(os.path.join(work_dir, 'sra')) sample_name = analysis_id + '.tar.gz' if input_args['single_end']: r = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*.f*'))] tarball_files(work_dir, tar_name=sample_name, files=r) else: r1 = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*_1*'))] r2 = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*_2*'))] tarball_files(work_dir, tar_name=sample_name, files=r1 + r2) # Parse s3_dir to get bucket and s3 path key_path = input_args['ssec'] s3_dir = input_args['s3_dir'] bucket_name = s3_dir.lstrip('/').split('/')[0] base_url = 'https://s3-us-west-2.amazonaws.com/' url = os.path.join(base_url, bucket_name, sample_name) # Generate keyfile for upload with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out: f_out.write(generate_unique_key(key_path, url)) # Upload to S3 via S3AM s3am_command = ['s3am', 'upload', '--sse-key-file', os.path.join(work_dir, 'temp.key'), 'file://{}'.format(os.path.join(work_dir, sample_name)), 's3://' + bucket_name + '/'] subprocess.check_call(s3am_command)
python
def download_and_transfer_sample(job, input_args, samples): """ Downloads a sample from dbGaP via SRAToolKit, then uses S3AM to transfer it to S3 input_args: dict Dictionary of input arguments analysis_id: str An analysis ID for a sample in CGHub """ if len(samples) > 1: a = samples[len(samples)/2:] b = samples[:len(samples)/2] job.addChildJobFn(download_and_transfer_sample, input_args, a, disk='30G') job.addChildJobFn(download_and_transfer_sample, input_args, b, disk='30G') else: analysis_id = samples[0] work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # Acquire dbgap_key shutil.copy(input_args['dbgap_key'], os.path.join(work_dir, 'dbgap.ngc')) # Call to fastq-dump to pull down SRA files and convert to fastq if input_args['single_end']: parameters = [analysis_id] else: parameters = ['--split-files', analysis_id] docker_call(tool='quay.io/ucsc_cgl/fastq-dump:2.5.7--4577a6c1a3c94adaa0c25dd6c03518ee610433d1', work_dir=work_dir, tool_parameters=parameters, sudo=sudo) # Collect files and encapsulate into a tarball shutil.rmtree(os.path.join(work_dir, 'sra')) sample_name = analysis_id + '.tar.gz' if input_args['single_end']: r = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*.f*'))] tarball_files(work_dir, tar_name=sample_name, files=r) else: r1 = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*_1*'))] r2 = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*_2*'))] tarball_files(work_dir, tar_name=sample_name, files=r1 + r2) # Parse s3_dir to get bucket and s3 path key_path = input_args['ssec'] s3_dir = input_args['s3_dir'] bucket_name = s3_dir.lstrip('/').split('/')[0] base_url = 'https://s3-us-west-2.amazonaws.com/' url = os.path.join(base_url, bucket_name, sample_name) # Generate keyfile for upload with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out: f_out.write(generate_unique_key(key_path, url)) # Upload to S3 via S3AM s3am_command = ['s3am', 'upload', '--sse-key-file', os.path.join(work_dir, 'temp.key'), 'file://{}'.format(os.path.join(work_dir, sample_name)), 's3://' + bucket_name + '/'] subprocess.check_call(s3am_command)
[ "def", "download_and_transfer_sample", "(", "job", ",", "input_args", ",", "samples", ")", ":", "if", "len", "(", "samples", ")", ">", "1", ":", "a", "=", "samples", "[", "len", "(", "samples", ")", "/", "2", ":", "]", "b", "=", "samples", "[", ":", "len", "(", "samples", ")", "/", "2", "]", "job", ".", "addChildJobFn", "(", "download_and_transfer_sample", ",", "input_args", ",", "a", ",", "disk", "=", "'30G'", ")", "job", ".", "addChildJobFn", "(", "download_and_transfer_sample", ",", "input_args", ",", "b", ",", "disk", "=", "'30G'", ")", "else", ":", "analysis_id", "=", "samples", "[", "0", "]", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "sudo", "=", "input_args", "[", "'sudo'", "]", "# Acquire dbgap_key", "shutil", ".", "copy", "(", "input_args", "[", "'dbgap_key'", "]", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'dbgap.ngc'", ")", ")", "# Call to fastq-dump to pull down SRA files and convert to fastq", "if", "input_args", "[", "'single_end'", "]", ":", "parameters", "=", "[", "analysis_id", "]", "else", ":", "parameters", "=", "[", "'--split-files'", ",", "analysis_id", "]", "docker_call", "(", "tool", "=", "'quay.io/ucsc_cgl/fastq-dump:2.5.7--4577a6c1a3c94adaa0c25dd6c03518ee610433d1'", ",", "work_dir", "=", "work_dir", ",", "tool_parameters", "=", "parameters", ",", "sudo", "=", "sudo", ")", "# Collect files and encapsulate into a tarball", "shutil", ".", "rmtree", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'sra'", ")", ")", "sample_name", "=", "analysis_id", "+", "'.tar.gz'", "if", "input_args", "[", "'single_end'", "]", ":", "r", "=", "[", "os", ".", "path", ".", "basename", "(", "x", ")", "for", "x", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'*.f*'", ")", ")", "]", "tarball_files", "(", "work_dir", ",", "tar_name", "=", "sample_name", ",", "files", "=", "r", ")", "else", ":", "r1", "=", "[", "os", ".", "path", ".", "basename", "(", "x", ")", "for", "x", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'*_1*'", ")", ")", "]", "r2", "=", "[", "os", ".", "path", ".", "basename", "(", "x", ")", "for", "x", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'*_2*'", ")", ")", "]", "tarball_files", "(", "work_dir", ",", "tar_name", "=", "sample_name", ",", "files", "=", "r1", "+", "r2", ")", "# Parse s3_dir to get bucket and s3 path", "key_path", "=", "input_args", "[", "'ssec'", "]", "s3_dir", "=", "input_args", "[", "'s3_dir'", "]", "bucket_name", "=", "s3_dir", ".", "lstrip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "[", "0", "]", "base_url", "=", "'https://s3-us-west-2.amazonaws.com/'", "url", "=", "os", ".", "path", ".", "join", "(", "base_url", ",", "bucket_name", ",", "sample_name", ")", "# Generate keyfile for upload", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'temp.key'", ")", ",", "'wb'", ")", "as", "f_out", ":", "f_out", ".", "write", "(", "generate_unique_key", "(", "key_path", ",", "url", ")", ")", "# Upload to S3 via S3AM", "s3am_command", "=", "[", "'s3am'", ",", "'upload'", ",", "'--sse-key-file'", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'temp.key'", ")", ",", "'file://{}'", ".", "format", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "sample_name", ")", ")", ",", "'s3://'", "+", "bucket_name", "+", "'/'", "]", "subprocess", ".", "check_call", "(", "s3am_command", ")" ]
Downloads a sample from dbGaP via SRAToolKit, then uses S3AM to transfer it to S3 input_args: dict Dictionary of input arguments analysis_id: str An analysis ID for a sample in CGHub
[ "Downloads", "a", "sample", "from", "dbGaP", "via", "SRAToolKit", "then", "uses", "S3AM", "to", "transfer", "it", "to", "S3" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/transfer_gtex_to_s3/transfer_gtex_to_s3.py#L122-L172
BD2KGenomics/toil-scripts
src/toil_scripts/transfer_gtex_to_s3/transfer_gtex_to_s3.py
main
def main(): """ Transfer gTEX data from dbGaP (NCBI) to S3 """ # Define Parser object and add to toil parser = build_parser() Job.Runner.addToilOptions(parser) args = parser.parse_args() # Store inputs from argparse inputs = {'sra': args.sra, 'dbgap_key': args.dbgap_key, 'ssec': args.ssec, 's3_dir': args.s3_dir, 'single_end': args.single_end, 'sudo': args.sudo} # Sanity checks if args.ssec: assert os.path.isfile(args.ssec) if args.sra: assert os.path.isfile(args.sra) if args.dbgap_key: assert os.path.isfile(args.dbgap_key) # Start Pipeline Job.Runner.startToil(Job.wrapJobFn(start_batch, inputs), args)
python
def main(): """ Transfer gTEX data from dbGaP (NCBI) to S3 """ # Define Parser object and add to toil parser = build_parser() Job.Runner.addToilOptions(parser) args = parser.parse_args() # Store inputs from argparse inputs = {'sra': args.sra, 'dbgap_key': args.dbgap_key, 'ssec': args.ssec, 's3_dir': args.s3_dir, 'single_end': args.single_end, 'sudo': args.sudo} # Sanity checks if args.ssec: assert os.path.isfile(args.ssec) if args.sra: assert os.path.isfile(args.sra) if args.dbgap_key: assert os.path.isfile(args.dbgap_key) # Start Pipeline Job.Runner.startToil(Job.wrapJobFn(start_batch, inputs), args)
[ "def", "main", "(", ")", ":", "# Define Parser object and add to toil", "parser", "=", "build_parser", "(", ")", "Job", ".", "Runner", ".", "addToilOptions", "(", "parser", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "# Store inputs from argparse", "inputs", "=", "{", "'sra'", ":", "args", ".", "sra", ",", "'dbgap_key'", ":", "args", ".", "dbgap_key", ",", "'ssec'", ":", "args", ".", "ssec", ",", "'s3_dir'", ":", "args", ".", "s3_dir", ",", "'single_end'", ":", "args", ".", "single_end", ",", "'sudo'", ":", "args", ".", "sudo", "}", "# Sanity checks", "if", "args", ".", "ssec", ":", "assert", "os", ".", "path", ".", "isfile", "(", "args", ".", "ssec", ")", "if", "args", ".", "sra", ":", "assert", "os", ".", "path", ".", "isfile", "(", "args", ".", "sra", ")", "if", "args", ".", "dbgap_key", ":", "assert", "os", ".", "path", ".", "isfile", "(", "args", ".", "dbgap_key", ")", "# Start Pipeline", "Job", ".", "Runner", ".", "startToil", "(", "Job", ".", "wrapJobFn", "(", "start_batch", ",", "inputs", ")", ",", "args", ")" ]
Transfer gTEX data from dbGaP (NCBI) to S3
[ "Transfer", "gTEX", "data", "from", "dbGaP", "(", "NCBI", ")", "to", "S3" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/transfer_gtex_to_s3/transfer_gtex_to_s3.py#L175-L198
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/common.py
output_file_job
def output_file_job(job, filename, file_id, output_dir, s3_key_path=None): """ Uploads a file from the FileStore to an output directory on the local filesystem or S3. :param JobFunctionWrappingJob job: passed automatically by Toil :param str filename: basename for file :param str file_id: FileStoreID :param str output_dir: Amazon S3 URL or local path :param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption :return: """ job.fileStore.logToMaster('Writing {} to {}'.format(filename, output_dir)) work_dir = job.fileStore.getLocalTempDir() filepath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, filename)) if urlparse(output_dir).scheme == 's3': s3am_upload(job=job, fpath=os.path.join(work_dir, filepath), s3_dir=output_dir, s3_key_path=s3_key_path) elif os.path.exists(os.path.join(output_dir, filename)): job.fileStore.logToMaster("File already exists: {}".format(filename)) else: mkdir_p(output_dir) copy_files([filepath], output_dir)
python
def output_file_job(job, filename, file_id, output_dir, s3_key_path=None): """ Uploads a file from the FileStore to an output directory on the local filesystem or S3. :param JobFunctionWrappingJob job: passed automatically by Toil :param str filename: basename for file :param str file_id: FileStoreID :param str output_dir: Amazon S3 URL or local path :param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption :return: """ job.fileStore.logToMaster('Writing {} to {}'.format(filename, output_dir)) work_dir = job.fileStore.getLocalTempDir() filepath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, filename)) if urlparse(output_dir).scheme == 's3': s3am_upload(job=job, fpath=os.path.join(work_dir, filepath), s3_dir=output_dir, s3_key_path=s3_key_path) elif os.path.exists(os.path.join(output_dir, filename)): job.fileStore.logToMaster("File already exists: {}".format(filename)) else: mkdir_p(output_dir) copy_files([filepath], output_dir)
[ "def", "output_file_job", "(", "job", ",", "filename", ",", "file_id", ",", "output_dir", ",", "s3_key_path", "=", "None", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Writing {} to {}'", ".", "format", "(", "filename", ",", "output_dir", ")", ")", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "filepath", "=", "job", ".", "fileStore", ".", "readGlobalFile", "(", "file_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "filename", ")", ")", "if", "urlparse", "(", "output_dir", ")", ".", "scheme", "==", "'s3'", ":", "s3am_upload", "(", "job", "=", "job", ",", "fpath", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "filepath", ")", ",", "s3_dir", "=", "output_dir", ",", "s3_key_path", "=", "s3_key_path", ")", "elif", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "output_dir", ",", "filename", ")", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "\"File already exists: {}\"", ".", "format", "(", "filename", ")", ")", "else", ":", "mkdir_p", "(", "output_dir", ")", "copy_files", "(", "[", "filepath", "]", ",", "output_dir", ")" ]
Uploads a file from the FileStore to an output directory on the local filesystem or S3. :param JobFunctionWrappingJob job: passed automatically by Toil :param str filename: basename for file :param str file_id: FileStoreID :param str output_dir: Amazon S3 URL or local path :param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption :return:
[ "Uploads", "a", "file", "from", "the", "FileStore", "to", "an", "output", "directory", "on", "the", "local", "filesystem", "or", "S3", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/common.py#L10-L32
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
download_encrypted_file
def download_encrypted_file(job, input_args, name): """ Downloads encrypted files from S3 via header injection input_args: dict Input dictionary defined in main() name: str Symbolic name associated with file """ work_dir = job.fileStore.getLocalTempDir() key_path = input_args['ssec'] file_path = os.path.join(work_dir, name) url = input_args[name] with open(key_path, 'r') as f: key = f.read() if len(key) != 32: raise RuntimeError('Invalid Key! Must be 32 bytes: {}'.format(key)) key = generate_unique_key(key_path, url) encoded_key = base64.b64encode(key) encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest()) h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256' h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key) h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5) try: subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path]) except OSError: raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"') assert os.path.exists(file_path) return job.fileStore.writeGlobalFile(file_path)
python
def download_encrypted_file(job, input_args, name): """ Downloads encrypted files from S3 via header injection input_args: dict Input dictionary defined in main() name: str Symbolic name associated with file """ work_dir = job.fileStore.getLocalTempDir() key_path = input_args['ssec'] file_path = os.path.join(work_dir, name) url = input_args[name] with open(key_path, 'r') as f: key = f.read() if len(key) != 32: raise RuntimeError('Invalid Key! Must be 32 bytes: {}'.format(key)) key = generate_unique_key(key_path, url) encoded_key = base64.b64encode(key) encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest()) h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256' h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key) h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5) try: subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path]) except OSError: raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"') assert os.path.exists(file_path) return job.fileStore.writeGlobalFile(file_path)
[ "def", "download_encrypted_file", "(", "job", ",", "input_args", ",", "name", ")", ":", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "key_path", "=", "input_args", "[", "'ssec'", "]", "file_path", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "name", ")", "url", "=", "input_args", "[", "name", "]", "with", "open", "(", "key_path", ",", "'r'", ")", "as", "f", ":", "key", "=", "f", ".", "read", "(", ")", "if", "len", "(", "key", ")", "!=", "32", ":", "raise", "RuntimeError", "(", "'Invalid Key! Must be 32 bytes: {}'", ".", "format", "(", "key", ")", ")", "key", "=", "generate_unique_key", "(", "key_path", ",", "url", ")", "encoded_key", "=", "base64", ".", "b64encode", "(", "key", ")", "encoded_key_md5", "=", "base64", ".", "b64encode", "(", "hashlib", ".", "md5", "(", "key", ")", ".", "digest", "(", ")", ")", "h1", "=", "'x-amz-server-side-encryption-customer-algorithm:AES256'", "h2", "=", "'x-amz-server-side-encryption-customer-key:{}'", ".", "format", "(", "encoded_key", ")", "h3", "=", "'x-amz-server-side-encryption-customer-key-md5:{}'", ".", "format", "(", "encoded_key_md5", ")", "try", ":", "subprocess", ".", "check_call", "(", "[", "'curl'", ",", "'-fs'", ",", "'--retry'", ",", "'5'", ",", "'-H'", ",", "h1", ",", "'-H'", ",", "h2", ",", "'-H'", ",", "h3", ",", "url", ",", "'-o'", ",", "file_path", "]", ")", "except", "OSError", ":", "raise", "RuntimeError", "(", "'Failed to find \"curl\". Install via \"apt-get install curl\"'", ")", "assert", "os", ".", "path", ".", "exists", "(", "file_path", ")", "return", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "file_path", ")" ]
Downloads encrypted files from S3 via header injection input_args: dict Input dictionary defined in main() name: str Symbolic name associated with file
[ "Downloads", "encrypted", "files", "from", "S3", "via", "header", "injection" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L177-L206
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
download_from_url
def download_from_url(job, url): """ Simple curl request made for a given url url: str URL to download """ work_dir = job.fileStore.getLocalTempDir() file_path = os.path.join(work_dir, os.path.basename(url)) if not os.path.exists(file_path): if url.startswith('s3:'): download_from_s3_url(file_path, url) else: try: subprocess.check_call(['curl', '-fs', '--retry', '5', '--create-dir', url, '-o', file_path]) except OSError: raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"') assert os.path.exists(file_path) return job.fileStore.writeGlobalFile(file_path)
python
def download_from_url(job, url): """ Simple curl request made for a given url url: str URL to download """ work_dir = job.fileStore.getLocalTempDir() file_path = os.path.join(work_dir, os.path.basename(url)) if not os.path.exists(file_path): if url.startswith('s3:'): download_from_s3_url(file_path, url) else: try: subprocess.check_call(['curl', '-fs', '--retry', '5', '--create-dir', url, '-o', file_path]) except OSError: raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"') assert os.path.exists(file_path) return job.fileStore.writeGlobalFile(file_path)
[ "def", "download_from_url", "(", "job", ",", "url", ")", ":", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "file_path", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "os", ".", "path", ".", "basename", "(", "url", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "if", "url", ".", "startswith", "(", "'s3:'", ")", ":", "download_from_s3_url", "(", "file_path", ",", "url", ")", "else", ":", "try", ":", "subprocess", ".", "check_call", "(", "[", "'curl'", ",", "'-fs'", ",", "'--retry'", ",", "'5'", ",", "'--create-dir'", ",", "url", ",", "'-o'", ",", "file_path", "]", ")", "except", "OSError", ":", "raise", "RuntimeError", "(", "'Failed to find \"curl\". Install via \"apt-get install curl\"'", ")", "assert", "os", ".", "path", ".", "exists", "(", "file_path", ")", "return", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "file_path", ")" ]
Simple curl request made for a given url url: str URL to download
[ "Simple", "curl", "request", "made", "for", "a", "given", "url" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L209-L226
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
docker_call
def docker_call(work_dir, tool_parameters, tool, java_opts=None, outfile=None, sudo=False): """ Makes subprocess call of a command to a docker container. tool_parameters: list An array of the parameters to be passed to the tool tool: str Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools) java_opts: str Optional commands to pass to a java jar execution. (e.g. '-Xmx15G') outfile: file Filehandle that stderr will be passed to sudo: bool If the user wants the docker command executed as sudo """ base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(work_dir).split() if sudo: base_docker_call = ['sudo'] + base_docker_call if java_opts: base_docker_call = base_docker_call + ['-e', 'JAVA_OPTS={}'.format(java_opts)] try: if outfile: subprocess.check_call(base_docker_call + [tool] + tool_parameters, stdout=outfile) else: subprocess.check_call(base_docker_call + [tool] + tool_parameters) except subprocess.CalledProcessError: raise RuntimeError('docker command returned a non-zero exit status. Check error logs.') except OSError: raise RuntimeError('docker not found on system. Install on all nodes.')
python
def docker_call(work_dir, tool_parameters, tool, java_opts=None, outfile=None, sudo=False): """ Makes subprocess call of a command to a docker container. tool_parameters: list An array of the parameters to be passed to the tool tool: str Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools) java_opts: str Optional commands to pass to a java jar execution. (e.g. '-Xmx15G') outfile: file Filehandle that stderr will be passed to sudo: bool If the user wants the docker command executed as sudo """ base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(work_dir).split() if sudo: base_docker_call = ['sudo'] + base_docker_call if java_opts: base_docker_call = base_docker_call + ['-e', 'JAVA_OPTS={}'.format(java_opts)] try: if outfile: subprocess.check_call(base_docker_call + [tool] + tool_parameters, stdout=outfile) else: subprocess.check_call(base_docker_call + [tool] + tool_parameters) except subprocess.CalledProcessError: raise RuntimeError('docker command returned a non-zero exit status. Check error logs.') except OSError: raise RuntimeError('docker not found on system. Install on all nodes.')
[ "def", "docker_call", "(", "work_dir", ",", "tool_parameters", ",", "tool", ",", "java_opts", "=", "None", ",", "outfile", "=", "None", ",", "sudo", "=", "False", ")", ":", "base_docker_call", "=", "'docker run --log-driver=none --rm -v {}:/data'", ".", "format", "(", "work_dir", ")", ".", "split", "(", ")", "if", "sudo", ":", "base_docker_call", "=", "[", "'sudo'", "]", "+", "base_docker_call", "if", "java_opts", ":", "base_docker_call", "=", "base_docker_call", "+", "[", "'-e'", ",", "'JAVA_OPTS={}'", ".", "format", "(", "java_opts", ")", "]", "try", ":", "if", "outfile", ":", "subprocess", ".", "check_call", "(", "base_docker_call", "+", "[", "tool", "]", "+", "tool_parameters", ",", "stdout", "=", "outfile", ")", "else", ":", "subprocess", ".", "check_call", "(", "base_docker_call", "+", "[", "tool", "]", "+", "tool_parameters", ")", "except", "subprocess", ".", "CalledProcessError", ":", "raise", "RuntimeError", "(", "'docker command returned a non-zero exit status. Check error logs.'", ")", "except", "OSError", ":", "raise", "RuntimeError", "(", "'docker not found on system. Install on all nodes.'", ")" ]
Makes subprocess call of a command to a docker container. tool_parameters: list An array of the parameters to be passed to the tool tool: str Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools) java_opts: str Optional commands to pass to a java jar execution. (e.g. '-Xmx15G') outfile: file Filehandle that stderr will be passed to sudo: bool If the user wants the docker command executed as sudo
[ "Makes", "subprocess", "call", "of", "a", "command", "to", "a", "docker", "container", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L257-L281
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
copy_to_output_dir
def copy_to_output_dir(work_dir, output_dir, uuid=None, files=list()): """ A list of files to move from work_dir to output_dir. work_dir: str Current working directory output_dir: str Output directory for files to go uuid: str UUID to "stamp" onto output files files: list List of files to iterate through """ for fname in files: if uuid is None: shutil.copy(os.path.join(work_dir, fname), os.path.join(output_dir, fname)) else: shutil.copy(os.path.join(work_dir, fname), os.path.join(output_dir, '{}.{}'.format(uuid, fname)))
python
def copy_to_output_dir(work_dir, output_dir, uuid=None, files=list()): """ A list of files to move from work_dir to output_dir. work_dir: str Current working directory output_dir: str Output directory for files to go uuid: str UUID to "stamp" onto output files files: list List of files to iterate through """ for fname in files: if uuid is None: shutil.copy(os.path.join(work_dir, fname), os.path.join(output_dir, fname)) else: shutil.copy(os.path.join(work_dir, fname), os.path.join(output_dir, '{}.{}'.format(uuid, fname)))
[ "def", "copy_to_output_dir", "(", "work_dir", ",", "output_dir", ",", "uuid", "=", "None", ",", "files", "=", "list", "(", ")", ")", ":", "for", "fname", "in", "files", ":", "if", "uuid", "is", "None", ":", "shutil", ".", "copy", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "fname", ")", ",", "os", ".", "path", ".", "join", "(", "output_dir", ",", "fname", ")", ")", "else", ":", "shutil", ".", "copy", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "fname", ")", ",", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'{}.{}'", ".", "format", "(", "uuid", ",", "fname", ")", ")", ")" ]
A list of files to move from work_dir to output_dir. work_dir: str Current working directory output_dir: str Output directory for files to go uuid: str UUID to "stamp" onto output files files: list List of files to iterate through
[ "A", "list", "of", "files", "to", "move", "from", "work_dir", "to", "output_dir", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L284-L297
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
program_checks
def program_checks(job, input_args): """ Checks that dependency programs are installed. input_args: dict Dictionary of input arguments (from main()) """ # Program checks for program in ['curl', 'docker', 'unzip', 'samtools']: assert which(program), 'Program "{}" must be installed on every node.'.format(program) job.addChildJobFn(download_shared_files, input_args)
python
def program_checks(job, input_args): """ Checks that dependency programs are installed. input_args: dict Dictionary of input arguments (from main()) """ # Program checks for program in ['curl', 'docker', 'unzip', 'samtools']: assert which(program), 'Program "{}" must be installed on every node.'.format(program) job.addChildJobFn(download_shared_files, input_args)
[ "def", "program_checks", "(", "job", ",", "input_args", ")", ":", "# Program checks", "for", "program", "in", "[", "'curl'", ",", "'docker'", ",", "'unzip'", ",", "'samtools'", "]", ":", "assert", "which", "(", "program", ")", ",", "'Program \"{}\" must be installed on every node.'", ".", "format", "(", "program", ")", "job", ".", "addChildJobFn", "(", "download_shared_files", ",", "input_args", ")" ]
Checks that dependency programs are installed. input_args: dict Dictionary of input arguments (from main())
[ "Checks", "that", "dependency", "programs", "are", "installed", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L334-L343
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
download_shared_files
def download_shared_files(job, input_args): """ Downloads and stores shared inputs files in the FileStore input_args: dict Dictionary of input arguments (from main()) """ shared_files = ['unc.bed', 'hg19.transcripts.fa', 'composite_exons.bed', 'normalize.pl', 'rsem_ref.zip', 'ebwt.zip', 'chromosomes.zip'] shared_ids = {} for f in shared_files: shared_ids[f] = job.addChildJobFn(download_from_url, input_args[f]).rv() if input_args['config'] or input_args['config_fastq']: job.addFollowOnJobFn(parse_config_file, shared_ids, input_args) else: sample_path = input_args['input'] uuid = os.path.splitext(os.path.basename(sample_path))[0] sample = (uuid, sample_path) job.addFollowOnJobFn(download_sample, shared_ids, input_args, sample)
python
def download_shared_files(job, input_args): """ Downloads and stores shared inputs files in the FileStore input_args: dict Dictionary of input arguments (from main()) """ shared_files = ['unc.bed', 'hg19.transcripts.fa', 'composite_exons.bed', 'normalize.pl', 'rsem_ref.zip', 'ebwt.zip', 'chromosomes.zip'] shared_ids = {} for f in shared_files: shared_ids[f] = job.addChildJobFn(download_from_url, input_args[f]).rv() if input_args['config'] or input_args['config_fastq']: job.addFollowOnJobFn(parse_config_file, shared_ids, input_args) else: sample_path = input_args['input'] uuid = os.path.splitext(os.path.basename(sample_path))[0] sample = (uuid, sample_path) job.addFollowOnJobFn(download_sample, shared_ids, input_args, sample)
[ "def", "download_shared_files", "(", "job", ",", "input_args", ")", ":", "shared_files", "=", "[", "'unc.bed'", ",", "'hg19.transcripts.fa'", ",", "'composite_exons.bed'", ",", "'normalize.pl'", ",", "'rsem_ref.zip'", ",", "'ebwt.zip'", ",", "'chromosomes.zip'", "]", "shared_ids", "=", "{", "}", "for", "f", "in", "shared_files", ":", "shared_ids", "[", "f", "]", "=", "job", ".", "addChildJobFn", "(", "download_from_url", ",", "input_args", "[", "f", "]", ")", ".", "rv", "(", ")", "if", "input_args", "[", "'config'", "]", "or", "input_args", "[", "'config_fastq'", "]", ":", "job", ".", "addFollowOnJobFn", "(", "parse_config_file", ",", "shared_ids", ",", "input_args", ")", "else", ":", "sample_path", "=", "input_args", "[", "'input'", "]", "uuid", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "sample_path", ")", ")", "[", "0", "]", "sample", "=", "(", "uuid", ",", "sample_path", ")", "job", ".", "addFollowOnJobFn", "(", "download_sample", ",", "shared_ids", ",", "input_args", ",", "sample", ")" ]
Downloads and stores shared inputs files in the FileStore input_args: dict Dictionary of input arguments (from main())
[ "Downloads", "and", "stores", "shared", "inputs", "files", "in", "the", "FileStore" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L346-L363
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
parse_config_file
def parse_config_file(job, ids, input_args): """ Launches pipeline for each sample. shared_ids: dict Dictionary of fileStore IDs input_args: dict Dictionary of input arguments """ samples = [] config = input_args['config'] with open(config, 'r') as f: for line in f.readlines(): if not line.isspace(): sample = line.strip().split(',') samples.append(sample) for sample in samples: job.addChildJobFn(download_sample, ids, input_args, sample)
python
def parse_config_file(job, ids, input_args): """ Launches pipeline for each sample. shared_ids: dict Dictionary of fileStore IDs input_args: dict Dictionary of input arguments """ samples = [] config = input_args['config'] with open(config, 'r') as f: for line in f.readlines(): if not line.isspace(): sample = line.strip().split(',') samples.append(sample) for sample in samples: job.addChildJobFn(download_sample, ids, input_args, sample)
[ "def", "parse_config_file", "(", "job", ",", "ids", ",", "input_args", ")", ":", "samples", "=", "[", "]", "config", "=", "input_args", "[", "'config'", "]", "with", "open", "(", "config", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "if", "not", "line", ".", "isspace", "(", ")", ":", "sample", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "','", ")", "samples", ".", "append", "(", "sample", ")", "for", "sample", "in", "samples", ":", "job", ".", "addChildJobFn", "(", "download_sample", ",", "ids", ",", "input_args", ",", "sample", ")" ]
Launches pipeline for each sample. shared_ids: dict Dictionary of fileStore IDs input_args: dict Dictionary of input arguments
[ "Launches", "pipeline", "for", "each", "sample", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L366-L381
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
download_sample
def download_sample(job, ids, input_args, sample): """ Defines variables unique to a sample that are used in the rest of the pipelines ids: dict Dictionary of fileStore IDS input_args: dict Dictionary of input arguments sample: tuple Contains uuid and sample_url """ if len(sample) == 2: uuid, sample_location = sample url1, url2 = None, None else: uuid, url1, url2 = sample sample_location = None # Update values unique to sample sample_input = dict(input_args) sample_input['uuid'] = uuid sample_input['sample.tar'] = sample_location if sample_input['output_dir']: sample_input['output_dir'] = os.path.join(input_args['output_dir'], uuid) sample_input['cpu_count'] = multiprocessing.cpu_count() job_vars = (sample_input, ids) # Download or locate local file and place in the jobStore if sample_input['input']: ids['sample.tar'] = job.fileStore.writeGlobalFile(os.path.abspath(sample_location)) elif sample_input['config_fastq']: ids['R1.fastq'] = job.fileStore.writeGlobalFile(urlparse(url1).path) ids['R2.fastq'] = job.fileStore.writeGlobalFile(urlparse(url2).path) else: if sample_input['ssec']: ids['sample.tar'] = job.addChildJobFn(download_encrypted_file, sample_input, 'sample.tar', disk='25G').rv() else: ids['sample.tar'] = job.addChildJobFn(download_from_url, sample_input['sample.tar'], disk='25G').rv() job.addFollowOnJobFn(static_dag_launchpoint, job_vars)
python
def download_sample(job, ids, input_args, sample): """ Defines variables unique to a sample that are used in the rest of the pipelines ids: dict Dictionary of fileStore IDS input_args: dict Dictionary of input arguments sample: tuple Contains uuid and sample_url """ if len(sample) == 2: uuid, sample_location = sample url1, url2 = None, None else: uuid, url1, url2 = sample sample_location = None # Update values unique to sample sample_input = dict(input_args) sample_input['uuid'] = uuid sample_input['sample.tar'] = sample_location if sample_input['output_dir']: sample_input['output_dir'] = os.path.join(input_args['output_dir'], uuid) sample_input['cpu_count'] = multiprocessing.cpu_count() job_vars = (sample_input, ids) # Download or locate local file and place in the jobStore if sample_input['input']: ids['sample.tar'] = job.fileStore.writeGlobalFile(os.path.abspath(sample_location)) elif sample_input['config_fastq']: ids['R1.fastq'] = job.fileStore.writeGlobalFile(urlparse(url1).path) ids['R2.fastq'] = job.fileStore.writeGlobalFile(urlparse(url2).path) else: if sample_input['ssec']: ids['sample.tar'] = job.addChildJobFn(download_encrypted_file, sample_input, 'sample.tar', disk='25G').rv() else: ids['sample.tar'] = job.addChildJobFn(download_from_url, sample_input['sample.tar'], disk='25G').rv() job.addFollowOnJobFn(static_dag_launchpoint, job_vars)
[ "def", "download_sample", "(", "job", ",", "ids", ",", "input_args", ",", "sample", ")", ":", "if", "len", "(", "sample", ")", "==", "2", ":", "uuid", ",", "sample_location", "=", "sample", "url1", ",", "url2", "=", "None", ",", "None", "else", ":", "uuid", ",", "url1", ",", "url2", "=", "sample", "sample_location", "=", "None", "# Update values unique to sample", "sample_input", "=", "dict", "(", "input_args", ")", "sample_input", "[", "'uuid'", "]", "=", "uuid", "sample_input", "[", "'sample.tar'", "]", "=", "sample_location", "if", "sample_input", "[", "'output_dir'", "]", ":", "sample_input", "[", "'output_dir'", "]", "=", "os", ".", "path", ".", "join", "(", "input_args", "[", "'output_dir'", "]", ",", "uuid", ")", "sample_input", "[", "'cpu_count'", "]", "=", "multiprocessing", ".", "cpu_count", "(", ")", "job_vars", "=", "(", "sample_input", ",", "ids", ")", "# Download or locate local file and place in the jobStore", "if", "sample_input", "[", "'input'", "]", ":", "ids", "[", "'sample.tar'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "abspath", "(", "sample_location", ")", ")", "elif", "sample_input", "[", "'config_fastq'", "]", ":", "ids", "[", "'R1.fastq'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "urlparse", "(", "url1", ")", ".", "path", ")", "ids", "[", "'R2.fastq'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "urlparse", "(", "url2", ")", ".", "path", ")", "else", ":", "if", "sample_input", "[", "'ssec'", "]", ":", "ids", "[", "'sample.tar'", "]", "=", "job", ".", "addChildJobFn", "(", "download_encrypted_file", ",", "sample_input", ",", "'sample.tar'", ",", "disk", "=", "'25G'", ")", ".", "rv", "(", ")", "else", ":", "ids", "[", "'sample.tar'", "]", "=", "job", ".", "addChildJobFn", "(", "download_from_url", ",", "sample_input", "[", "'sample.tar'", "]", ",", "disk", "=", "'25G'", ")", ".", "rv", "(", ")", "job", ".", "addFollowOnJobFn", "(", "static_dag_launchpoint", ",", "job_vars", ")" ]
Defines variables unique to a sample that are used in the rest of the pipelines ids: dict Dictionary of fileStore IDS input_args: dict Dictionary of input arguments sample: tuple Contains uuid and sample_url
[ "Defines", "variables", "unique", "to", "a", "sample", "that", "are", "used", "in", "the", "rest", "of", "the", "pipelines" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L384-L417
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
static_dag_launchpoint
def static_dag_launchpoint(job, job_vars): """ Statically define jobs in the pipeline job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars if input_args['config_fastq']: cores = input_args['cpu_count'] a = job.wrapJobFn(mapsplice, job_vars, cores=cores, disk='130G').encapsulate() else: a = job.wrapJobFn(merge_fastqs, job_vars, disk='70 G').encapsulate() b = job.wrapJobFn(consolidate_output, job_vars, a.rv()) # Take advantage of "encapsulate" to simplify pipeline wiring job.addChild(a) a.addChild(b)
python
def static_dag_launchpoint(job, job_vars): """ Statically define jobs in the pipeline job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars if input_args['config_fastq']: cores = input_args['cpu_count'] a = job.wrapJobFn(mapsplice, job_vars, cores=cores, disk='130G').encapsulate() else: a = job.wrapJobFn(merge_fastqs, job_vars, disk='70 G').encapsulate() b = job.wrapJobFn(consolidate_output, job_vars, a.rv()) # Take advantage of "encapsulate" to simplify pipeline wiring job.addChild(a) a.addChild(b)
[ "def", "static_dag_launchpoint", "(", "job", ",", "job_vars", ")", ":", "input_args", ",", "ids", "=", "job_vars", "if", "input_args", "[", "'config_fastq'", "]", ":", "cores", "=", "input_args", "[", "'cpu_count'", "]", "a", "=", "job", ".", "wrapJobFn", "(", "mapsplice", ",", "job_vars", ",", "cores", "=", "cores", ",", "disk", "=", "'130G'", ")", ".", "encapsulate", "(", ")", "else", ":", "a", "=", "job", ".", "wrapJobFn", "(", "merge_fastqs", ",", "job_vars", ",", "disk", "=", "'70 G'", ")", ".", "encapsulate", "(", ")", "b", "=", "job", ".", "wrapJobFn", "(", "consolidate_output", ",", "job_vars", ",", "a", ".", "rv", "(", ")", ")", "# Take advantage of \"encapsulate\" to simplify pipeline wiring", "job", ".", "addChild", "(", "a", ")", "a", ".", "addChild", "(", "b", ")" ]
Statically define jobs in the pipeline job_vars: tuple Tuple of dictionaries: input_args and ids
[ "Statically", "define", "jobs", "in", "the", "pipeline" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L420-L435
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
merge_fastqs
def merge_fastqs(job, job_vars): """ Unzips input sample and concats the Read1 and Read2 groups together. job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cores = input_args['cpu_count'] single_end_reads = input_args['single_end_reads'] # I/O sample = return_input_paths(job, work_dir, ids, 'sample.tar') # Untar File # subprocess.check_call(['unzip', sample, '-d', work_dir]) subprocess.check_call(['tar', '-xvf', sample, '-C', work_dir]) # Remove large files before creating concat versions. os.remove(os.path.join(work_dir, 'sample.tar')) # Zcat files in parallel if single_end_reads: files = sorted(glob.glob(os.path.join(work_dir, '*'))) with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1: subprocess.check_call(['zcat'] + files, stdout=f1) # FileStore ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq')) else: r1_files = sorted(glob.glob(os.path.join(work_dir, '*R1*'))) r2_files = sorted(glob.glob(os.path.join(work_dir, '*R2*'))) with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1: p1 = subprocess.Popen(['zcat'] + r1_files, stdout=f1) with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2: p2 = subprocess.Popen(['zcat'] + r2_files, stdout=f2) p1.wait() p2.wait() # FileStore ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq')) ids['R2.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq')) job.fileStore.deleteGlobalFile(ids['sample.tar']) # Spawn child job return job.addChildJobFn(mapsplice, job_vars, cores=cores, disk='130 G').rv()
python
def merge_fastqs(job, job_vars): """ Unzips input sample and concats the Read1 and Read2 groups together. job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cores = input_args['cpu_count'] single_end_reads = input_args['single_end_reads'] # I/O sample = return_input_paths(job, work_dir, ids, 'sample.tar') # Untar File # subprocess.check_call(['unzip', sample, '-d', work_dir]) subprocess.check_call(['tar', '-xvf', sample, '-C', work_dir]) # Remove large files before creating concat versions. os.remove(os.path.join(work_dir, 'sample.tar')) # Zcat files in parallel if single_end_reads: files = sorted(glob.glob(os.path.join(work_dir, '*'))) with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1: subprocess.check_call(['zcat'] + files, stdout=f1) # FileStore ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq')) else: r1_files = sorted(glob.glob(os.path.join(work_dir, '*R1*'))) r2_files = sorted(glob.glob(os.path.join(work_dir, '*R2*'))) with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1: p1 = subprocess.Popen(['zcat'] + r1_files, stdout=f1) with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2: p2 = subprocess.Popen(['zcat'] + r2_files, stdout=f2) p1.wait() p2.wait() # FileStore ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq')) ids['R2.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq')) job.fileStore.deleteGlobalFile(ids['sample.tar']) # Spawn child job return job.addChildJobFn(mapsplice, job_vars, cores=cores, disk='130 G').rv()
[ "def", "merge_fastqs", "(", "job", ",", "job_vars", ")", ":", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "cores", "=", "input_args", "[", "'cpu_count'", "]", "single_end_reads", "=", "input_args", "[", "'single_end_reads'", "]", "# I/O", "sample", "=", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'sample.tar'", ")", "# Untar File", "# subprocess.check_call(['unzip', sample, '-d', work_dir])", "subprocess", ".", "check_call", "(", "[", "'tar'", ",", "'-xvf'", ",", "sample", ",", "'-C'", ",", "work_dir", "]", ")", "# Remove large files before creating concat versions.", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'sample.tar'", ")", ")", "# Zcat files in parallel", "if", "single_end_reads", ":", "files", "=", "sorted", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'*'", ")", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R1.fastq'", ")", ",", "'w'", ")", "as", "f1", ":", "subprocess", ".", "check_call", "(", "[", "'zcat'", "]", "+", "files", ",", "stdout", "=", "f1", ")", "# FileStore", "ids", "[", "'R1.fastq'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R1.fastq'", ")", ")", "else", ":", "r1_files", "=", "sorted", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'*R1*'", ")", ")", ")", "r2_files", "=", "sorted", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'*R2*'", ")", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R1.fastq'", ")", ",", "'w'", ")", "as", "f1", ":", "p1", "=", "subprocess", ".", "Popen", "(", "[", "'zcat'", "]", "+", "r1_files", ",", "stdout", "=", "f1", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R2.fastq'", ")", ",", "'w'", ")", "as", "f2", ":", "p2", "=", "subprocess", ".", "Popen", "(", "[", "'zcat'", "]", "+", "r2_files", ",", "stdout", "=", "f2", ")", "p1", ".", "wait", "(", ")", "p2", ".", "wait", "(", ")", "# FileStore", "ids", "[", "'R1.fastq'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R1.fastq'", ")", ")", "ids", "[", "'R2.fastq'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R2.fastq'", ")", ")", "job", ".", "fileStore", ".", "deleteGlobalFile", "(", "ids", "[", "'sample.tar'", "]", ")", "# Spawn child job", "return", "job", ".", "addChildJobFn", "(", "mapsplice", ",", "job_vars", ",", "cores", "=", "cores", ",", "disk", "=", "'130 G'", ")", ".", "rv", "(", ")" ]
Unzips input sample and concats the Read1 and Read2 groups together. job_vars: tuple Tuple of dictionaries: input_args and ids
[ "Unzips", "input", "sample", "and", "concats", "the", "Read1", "and", "Read2", "groups", "together", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L438-L476
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
mapsplice
def mapsplice(job, job_vars): """ Maps RNA-Seq reads to a reference genome. job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cores = input_args['cpu_count'] sudo = input_args['sudo'] single_end_reads = input_args['single_end_reads'] files_to_delete = ['R1.fastq'] # I/O return_input_paths(job, work_dir, ids, 'ebwt.zip', 'chromosomes.zip') if single_end_reads: return_input_paths(job, work_dir, ids, 'R1.fastq') else: return_input_paths(job, work_dir, ids, 'R1.fastq', 'R2.fastq') files_to_delete.extend(['R2.fastq']) for fname in ['chromosomes.zip', 'ebwt.zip']: subprocess.check_call(['unzip', '-o', os.path.join(work_dir, fname), '-d', work_dir]) # Command and call parameters = ['-p', str(cores), '-s', '25', '--bam', '--min-map-len', '50', '-x', '/data/ebwt', '-c', '/data/chromosomes', '-1', '/data/R1.fastq', '-o', '/data'] if not single_end_reads: parameters.extend(['-2', '/data/R2.fastq']) docker_call(tool='quay.io/ucsc_cgl/mapsplice:2.1.8--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=parameters, work_dir=work_dir, sudo=sudo) # Write to FileStore for fname in ['alignments.bam', 'stats.txt']: ids[fname] = job.fileStore.writeGlobalFile(os.path.join(work_dir, fname)) for fname in files_to_delete: job.fileStore.deleteGlobalFile(ids[fname]) # Run child job # map_id = job.addChildJobFn(mapping_stats, job_vars).rv() if input_args['upload_bam_to_s3'] and input_args['s3_dir']: job.addChildJobFn(upload_bam_to_s3, job_vars) output_ids = job.addChildJobFn(add_read_groups, job_vars, disk='30 G').rv() return output_ids
python
def mapsplice(job, job_vars): """ Maps RNA-Seq reads to a reference genome. job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cores = input_args['cpu_count'] sudo = input_args['sudo'] single_end_reads = input_args['single_end_reads'] files_to_delete = ['R1.fastq'] # I/O return_input_paths(job, work_dir, ids, 'ebwt.zip', 'chromosomes.zip') if single_end_reads: return_input_paths(job, work_dir, ids, 'R1.fastq') else: return_input_paths(job, work_dir, ids, 'R1.fastq', 'R2.fastq') files_to_delete.extend(['R2.fastq']) for fname in ['chromosomes.zip', 'ebwt.zip']: subprocess.check_call(['unzip', '-o', os.path.join(work_dir, fname), '-d', work_dir]) # Command and call parameters = ['-p', str(cores), '-s', '25', '--bam', '--min-map-len', '50', '-x', '/data/ebwt', '-c', '/data/chromosomes', '-1', '/data/R1.fastq', '-o', '/data'] if not single_end_reads: parameters.extend(['-2', '/data/R2.fastq']) docker_call(tool='quay.io/ucsc_cgl/mapsplice:2.1.8--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=parameters, work_dir=work_dir, sudo=sudo) # Write to FileStore for fname in ['alignments.bam', 'stats.txt']: ids[fname] = job.fileStore.writeGlobalFile(os.path.join(work_dir, fname)) for fname in files_to_delete: job.fileStore.deleteGlobalFile(ids[fname]) # Run child job # map_id = job.addChildJobFn(mapping_stats, job_vars).rv() if input_args['upload_bam_to_s3'] and input_args['s3_dir']: job.addChildJobFn(upload_bam_to_s3, job_vars) output_ids = job.addChildJobFn(add_read_groups, job_vars, disk='30 G').rv() return output_ids
[ "def", "mapsplice", "(", "job", ",", "job_vars", ")", ":", "# Unpack variables", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "cores", "=", "input_args", "[", "'cpu_count'", "]", "sudo", "=", "input_args", "[", "'sudo'", "]", "single_end_reads", "=", "input_args", "[", "'single_end_reads'", "]", "files_to_delete", "=", "[", "'R1.fastq'", "]", "# I/O", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'ebwt.zip'", ",", "'chromosomes.zip'", ")", "if", "single_end_reads", ":", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'R1.fastq'", ")", "else", ":", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'R1.fastq'", ",", "'R2.fastq'", ")", "files_to_delete", ".", "extend", "(", "[", "'R2.fastq'", "]", ")", "for", "fname", "in", "[", "'chromosomes.zip'", ",", "'ebwt.zip'", "]", ":", "subprocess", ".", "check_call", "(", "[", "'unzip'", ",", "'-o'", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "fname", ")", ",", "'-d'", ",", "work_dir", "]", ")", "# Command and call", "parameters", "=", "[", "'-p'", ",", "str", "(", "cores", ")", ",", "'-s'", ",", "'25'", ",", "'--bam'", ",", "'--min-map-len'", ",", "'50'", ",", "'-x'", ",", "'/data/ebwt'", ",", "'-c'", ",", "'/data/chromosomes'", ",", "'-1'", ",", "'/data/R1.fastq'", ",", "'-o'", ",", "'/data'", "]", "if", "not", "single_end_reads", ":", "parameters", ".", "extend", "(", "[", "'-2'", ",", "'/data/R2.fastq'", "]", ")", "docker_call", "(", "tool", "=", "'quay.io/ucsc_cgl/mapsplice:2.1.8--dd5ac549b95eb3e5d166a5e310417ef13651994e'", ",", "tool_parameters", "=", "parameters", ",", "work_dir", "=", "work_dir", ",", "sudo", "=", "sudo", ")", "# Write to FileStore", "for", "fname", "in", "[", "'alignments.bam'", ",", "'stats.txt'", "]", ":", "ids", "[", "fname", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "fname", ")", ")", "for", "fname", "in", "files_to_delete", ":", "job", ".", "fileStore", ".", "deleteGlobalFile", "(", "ids", "[", "fname", "]", ")", "# Run child job", "# map_id = job.addChildJobFn(mapping_stats, job_vars).rv()", "if", "input_args", "[", "'upload_bam_to_s3'", "]", "and", "input_args", "[", "'s3_dir'", "]", ":", "job", ".", "addChildJobFn", "(", "upload_bam_to_s3", ",", "job_vars", ")", "output_ids", "=", "job", ".", "addChildJobFn", "(", "add_read_groups", ",", "job_vars", ",", "disk", "=", "'30 G'", ")", ".", "rv", "(", ")", "return", "output_ids" ]
Maps RNA-Seq reads to a reference genome. job_vars: tuple Tuple of dictionaries: input_args and ids
[ "Maps", "RNA", "-", "Seq", "reads", "to", "a", "reference", "genome", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L479-L524
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
add_read_groups
def add_read_groups(job, job_vars): """ This function adds read groups to the headers job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # I/O alignments = return_input_paths(job, work_dir, ids, 'alignments.bam') output = os.path.join(work_dir, 'rg_alignments.bam') # Command and callg parameter = ['AddOrReplaceReadGroups', 'INPUT={}'.format(docker_path(alignments)), 'OUTPUT={}'.format(docker_path(output)), 'RGSM={}'.format(input_args['uuid']), 'RGID={}'.format(input_args['uuid']), 'RGLB=TruSeq', 'RGPL=illumina', 'RGPU=barcode', 'VALIDATION_STRINGENCY=SILENT'] docker_call(tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=parameter, work_dir=work_dir, sudo=sudo) # Write to FileStore ids['rg_alignments.bam'] = job.fileStore.writeGlobalFile(output) # Run child job return job.addChildJobFn(bamsort_and_index, job_vars, disk='30 G').rv()
python
def add_read_groups(job, job_vars): """ This function adds read groups to the headers job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # I/O alignments = return_input_paths(job, work_dir, ids, 'alignments.bam') output = os.path.join(work_dir, 'rg_alignments.bam') # Command and callg parameter = ['AddOrReplaceReadGroups', 'INPUT={}'.format(docker_path(alignments)), 'OUTPUT={}'.format(docker_path(output)), 'RGSM={}'.format(input_args['uuid']), 'RGID={}'.format(input_args['uuid']), 'RGLB=TruSeq', 'RGPL=illumina', 'RGPU=barcode', 'VALIDATION_STRINGENCY=SILENT'] docker_call(tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=parameter, work_dir=work_dir, sudo=sudo) # Write to FileStore ids['rg_alignments.bam'] = job.fileStore.writeGlobalFile(output) # Run child job return job.addChildJobFn(bamsort_and_index, job_vars, disk='30 G').rv()
[ "def", "add_read_groups", "(", "job", ",", "job_vars", ")", ":", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "sudo", "=", "input_args", "[", "'sudo'", "]", "# I/O", "alignments", "=", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'alignments.bam'", ")", "output", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'rg_alignments.bam'", ")", "# Command and callg", "parameter", "=", "[", "'AddOrReplaceReadGroups'", ",", "'INPUT={}'", ".", "format", "(", "docker_path", "(", "alignments", ")", ")", ",", "'OUTPUT={}'", ".", "format", "(", "docker_path", "(", "output", ")", ")", ",", "'RGSM={}'", ".", "format", "(", "input_args", "[", "'uuid'", "]", ")", ",", "'RGID={}'", ".", "format", "(", "input_args", "[", "'uuid'", "]", ")", ",", "'RGLB=TruSeq'", ",", "'RGPL=illumina'", ",", "'RGPU=barcode'", ",", "'VALIDATION_STRINGENCY=SILENT'", "]", "docker_call", "(", "tool", "=", "'quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e'", ",", "tool_parameters", "=", "parameter", ",", "work_dir", "=", "work_dir", ",", "sudo", "=", "sudo", ")", "# Write to FileStore", "ids", "[", "'rg_alignments.bam'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "output", ")", "# Run child job", "return", "job", ".", "addChildJobFn", "(", "bamsort_and_index", ",", "job_vars", ",", "disk", "=", "'30 G'", ")", ".", "rv", "(", ")" ]
This function adds read groups to the headers job_vars: tuple Tuple of dictionaries: input_args and ids
[ "This", "function", "adds", "read", "groups", "to", "the", "headers" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L548-L575
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
bamsort_and_index
def bamsort_and_index(job, job_vars): """ Sorts bam file and produces index file job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # I/O rg_alignments = return_input_paths(job, work_dir, ids, 'rg_alignments.bam') output = os.path.join(work_dir, 'sorted.bam') # Command -- second argument is "Output Prefix" cmd1 = ['sort', docker_path(rg_alignments), docker_path('sorted')] cmd2 = ['index', docker_path(output)] docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=cmd1, work_dir=work_dir, sudo=sudo) docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=cmd2, work_dir=work_dir, sudo=sudo) # Write to FileStore ids['sorted.bam'] = job.fileStore.writeGlobalFile(output) ids['sorted.bam.bai'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'sorted.bam.bai')) # Run child job output_ids = job.addChildJobFn(sort_bam_by_reference, job_vars, disk='50 G').rv() rseq_id = job.addChildJobFn(rseq_qc, job_vars, disk='20 G').rv() return rseq_id, output_ids
python
def bamsort_and_index(job, job_vars): """ Sorts bam file and produces index file job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # I/O rg_alignments = return_input_paths(job, work_dir, ids, 'rg_alignments.bam') output = os.path.join(work_dir, 'sorted.bam') # Command -- second argument is "Output Prefix" cmd1 = ['sort', docker_path(rg_alignments), docker_path('sorted')] cmd2 = ['index', docker_path(output)] docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=cmd1, work_dir=work_dir, sudo=sudo) docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=cmd2, work_dir=work_dir, sudo=sudo) # Write to FileStore ids['sorted.bam'] = job.fileStore.writeGlobalFile(output) ids['sorted.bam.bai'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'sorted.bam.bai')) # Run child job output_ids = job.addChildJobFn(sort_bam_by_reference, job_vars, disk='50 G').rv() rseq_id = job.addChildJobFn(rseq_qc, job_vars, disk='20 G').rv() return rseq_id, output_ids
[ "def", "bamsort_and_index", "(", "job", ",", "job_vars", ")", ":", "# Unpack variables", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "sudo", "=", "input_args", "[", "'sudo'", "]", "# I/O", "rg_alignments", "=", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'rg_alignments.bam'", ")", "output", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'sorted.bam'", ")", "# Command -- second argument is \"Output Prefix\"", "cmd1", "=", "[", "'sort'", ",", "docker_path", "(", "rg_alignments", ")", ",", "docker_path", "(", "'sorted'", ")", "]", "cmd2", "=", "[", "'index'", ",", "docker_path", "(", "output", ")", "]", "docker_call", "(", "tool", "=", "'quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e'", ",", "tool_parameters", "=", "cmd1", ",", "work_dir", "=", "work_dir", ",", "sudo", "=", "sudo", ")", "docker_call", "(", "tool", "=", "'quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e'", ",", "tool_parameters", "=", "cmd2", ",", "work_dir", "=", "work_dir", ",", "sudo", "=", "sudo", ")", "# Write to FileStore", "ids", "[", "'sorted.bam'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "output", ")", "ids", "[", "'sorted.bam.bai'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'sorted.bam.bai'", ")", ")", "# Run child job", "output_ids", "=", "job", ".", "addChildJobFn", "(", "sort_bam_by_reference", ",", "job_vars", ",", "disk", "=", "'50 G'", ")", ".", "rv", "(", ")", "rseq_id", "=", "job", ".", "addChildJobFn", "(", "rseq_qc", ",", "job_vars", ",", "disk", "=", "'20 G'", ")", ".", "rv", "(", ")", "return", "rseq_id", ",", "output_ids" ]
Sorts bam file and produces index file job_vars: tuple Tuple of dictionaries: input_args and ids
[ "Sorts", "bam", "file", "and", "produces", "index", "file" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L578-L604
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
rseq_qc
def rseq_qc(job, job_vars): """ QC module: contains QC metrics and information about the BAM post alignment job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] sudo = input_args['sudo'] # I/O return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai') # Command docker_call(tool='jvivian/qc', tool_parameters=['/opt/cgl-docker-lib/RseqQC_v2.sh', '/data/sorted.bam', uuid], work_dir=work_dir, sudo=sudo) # Write to FileStore output_files = [f for f in glob.glob(os.path.join(work_dir, '*')) if 'sorted.bam' not in f] tarball_files(work_dir, tar_name='qc.tar.gz', uuid=None, files=output_files) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'qc.tar.gz'))
python
def rseq_qc(job, job_vars): """ QC module: contains QC metrics and information about the BAM post alignment job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] sudo = input_args['sudo'] # I/O return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai') # Command docker_call(tool='jvivian/qc', tool_parameters=['/opt/cgl-docker-lib/RseqQC_v2.sh', '/data/sorted.bam', uuid], work_dir=work_dir, sudo=sudo) # Write to FileStore output_files = [f for f in glob.glob(os.path.join(work_dir, '*')) if 'sorted.bam' not in f] tarball_files(work_dir, tar_name='qc.tar.gz', uuid=None, files=output_files) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'qc.tar.gz'))
[ "def", "rseq_qc", "(", "job", ",", "job_vars", ")", ":", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "uuid", "=", "input_args", "[", "'uuid'", "]", "sudo", "=", "input_args", "[", "'sudo'", "]", "# I/O", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'sorted.bam'", ",", "'sorted.bam.bai'", ")", "# Command", "docker_call", "(", "tool", "=", "'jvivian/qc'", ",", "tool_parameters", "=", "[", "'/opt/cgl-docker-lib/RseqQC_v2.sh'", ",", "'/data/sorted.bam'", ",", "uuid", "]", ",", "work_dir", "=", "work_dir", ",", "sudo", "=", "sudo", ")", "# Write to FileStore", "output_files", "=", "[", "f", "for", "f", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'*'", ")", ")", "if", "'sorted.bam'", "not", "in", "f", "]", "tarball_files", "(", "work_dir", ",", "tar_name", "=", "'qc.tar.gz'", ",", "uuid", "=", "None", ",", "files", "=", "output_files", ")", "return", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'qc.tar.gz'", ")", ")" ]
QC module: contains QC metrics and information about the BAM post alignment job_vars: tuple Tuple of dictionaries: input_args and ids
[ "QC", "module", ":", "contains", "QC", "metrics", "and", "information", "about", "the", "BAM", "post", "alignment" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L607-L625
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
sort_bam_by_reference
def sort_bam_by_reference(job, job_vars): """ Sorts the bam by reference job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() # I/O sorted_bam, sorted_bai = return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai') output = os.path.join(work_dir, 'sort_by_ref.bam') # Call: Samtools ref_seqs = [] handle = subprocess.Popen(["samtools", "view", "-H", sorted_bam], stdout=subprocess.PIPE).stdout for line in handle: if line.startswith("@SQ"): tmp = line.split("\t") chrom = tmp[1].split(":")[1] ref_seqs.append(chrom) handle.close() # Iterate through chromosomes to create mini-bams for chrom in ref_seqs: # job.addChildJobFn(sbbr_child, chrom, os.path.join(work_dir, chrom), sorted_bam) cmd_view = ["samtools", "view", "-b", sorted_bam, chrom] cmd_sort = ["samtools", "sort", "-m", "3000000000", "-n", "-", os.path.join(work_dir, chrom)] p1 = subprocess.Popen(cmd_view, stdout=subprocess.PIPE) subprocess.check_call(cmd_sort, stdin=p1.stdout) sorted_files = [os.path.join(work_dir, chrom) + '.bam' for chrom in ref_seqs] cmd = ["samtools", "cat", "-o", output] + sorted_files subprocess.check_call(cmd) # Write to FileStore ids['sort_by_ref.bam'] = job.fileStore.writeGlobalFile(output) rsem_id = job.addChildJobFn(transcriptome, job_vars, disk='30 G', memory='30 G').rv() exon_id = job.addChildJobFn(exon_count, job_vars, disk='30 G').rv() return exon_id, rsem_id
python
def sort_bam_by_reference(job, job_vars): """ Sorts the bam by reference job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() # I/O sorted_bam, sorted_bai = return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai') output = os.path.join(work_dir, 'sort_by_ref.bam') # Call: Samtools ref_seqs = [] handle = subprocess.Popen(["samtools", "view", "-H", sorted_bam], stdout=subprocess.PIPE).stdout for line in handle: if line.startswith("@SQ"): tmp = line.split("\t") chrom = tmp[1].split(":")[1] ref_seqs.append(chrom) handle.close() # Iterate through chromosomes to create mini-bams for chrom in ref_seqs: # job.addChildJobFn(sbbr_child, chrom, os.path.join(work_dir, chrom), sorted_bam) cmd_view = ["samtools", "view", "-b", sorted_bam, chrom] cmd_sort = ["samtools", "sort", "-m", "3000000000", "-n", "-", os.path.join(work_dir, chrom)] p1 = subprocess.Popen(cmd_view, stdout=subprocess.PIPE) subprocess.check_call(cmd_sort, stdin=p1.stdout) sorted_files = [os.path.join(work_dir, chrom) + '.bam' for chrom in ref_seqs] cmd = ["samtools", "cat", "-o", output] + sorted_files subprocess.check_call(cmd) # Write to FileStore ids['sort_by_ref.bam'] = job.fileStore.writeGlobalFile(output) rsem_id = job.addChildJobFn(transcriptome, job_vars, disk='30 G', memory='30 G').rv() exon_id = job.addChildJobFn(exon_count, job_vars, disk='30 G').rv() return exon_id, rsem_id
[ "def", "sort_bam_by_reference", "(", "job", ",", "job_vars", ")", ":", "# Unpack variables", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "# I/O", "sorted_bam", ",", "sorted_bai", "=", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'sorted.bam'", ",", "'sorted.bam.bai'", ")", "output", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'sort_by_ref.bam'", ")", "# Call: Samtools", "ref_seqs", "=", "[", "]", "handle", "=", "subprocess", ".", "Popen", "(", "[", "\"samtools\"", ",", "\"view\"", ",", "\"-H\"", ",", "sorted_bam", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "stdout", "for", "line", "in", "handle", ":", "if", "line", ".", "startswith", "(", "\"@SQ\"", ")", ":", "tmp", "=", "line", ".", "split", "(", "\"\\t\"", ")", "chrom", "=", "tmp", "[", "1", "]", ".", "split", "(", "\":\"", ")", "[", "1", "]", "ref_seqs", ".", "append", "(", "chrom", ")", "handle", ".", "close", "(", ")", "# Iterate through chromosomes to create mini-bams", "for", "chrom", "in", "ref_seqs", ":", "# job.addChildJobFn(sbbr_child, chrom, os.path.join(work_dir, chrom), sorted_bam)", "cmd_view", "=", "[", "\"samtools\"", ",", "\"view\"", ",", "\"-b\"", ",", "sorted_bam", ",", "chrom", "]", "cmd_sort", "=", "[", "\"samtools\"", ",", "\"sort\"", ",", "\"-m\"", ",", "\"3000000000\"", ",", "\"-n\"", ",", "\"-\"", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "chrom", ")", "]", "p1", "=", "subprocess", ".", "Popen", "(", "cmd_view", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "subprocess", ".", "check_call", "(", "cmd_sort", ",", "stdin", "=", "p1", ".", "stdout", ")", "sorted_files", "=", "[", "os", ".", "path", ".", "join", "(", "work_dir", ",", "chrom", ")", "+", "'.bam'", "for", "chrom", "in", "ref_seqs", "]", "cmd", "=", "[", "\"samtools\"", ",", "\"cat\"", ",", "\"-o\"", ",", "output", "]", "+", "sorted_files", "subprocess", ".", "check_call", "(", "cmd", ")", "# Write to FileStore", "ids", "[", "'sort_by_ref.bam'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "output", ")", "rsem_id", "=", "job", ".", "addChildJobFn", "(", "transcriptome", ",", "job_vars", ",", "disk", "=", "'30 G'", ",", "memory", "=", "'30 G'", ")", ".", "rv", "(", ")", "exon_id", "=", "job", ".", "addChildJobFn", "(", "exon_count", ",", "job_vars", ",", "disk", "=", "'30 G'", ")", ".", "rv", "(", ")", "return", "exon_id", ",", "rsem_id" ]
Sorts the bam by reference job_vars: tuple Tuple of dictionaries: input_args and ids
[ "Sorts", "the", "bam", "by", "reference" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L628-L663
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
exon_count
def exon_count(job, job_vars): """ Produces exon counts job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] sudo = input_args['sudo'] # I/O sort_by_ref, normalize_pl, composite_bed = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam', 'normalize.pl', 'composite_exons.bed') # Command tool = 'jvivian/bedtools' cmd_1 = ['coverage', '-split', '-abam', docker_path(sort_by_ref), '-b', docker_path(composite_bed)] cmd_2 = ['perl', os.path.join(work_dir, 'normalize.pl'), sort_by_ref, composite_bed] popen_docker = ['docker', 'run', '-v', '{}:/data'.format(work_dir), tool] if sudo: popen_docker = ['sudo'] + popen_docker p = subprocess.Popen(popen_docker + cmd_1, stdout=subprocess.PIPE) with open(os.path.join(work_dir, 'exon_quant'), 'w') as f: subprocess.check_call(cmd_2, stdin=p.stdout, stdout=f) p1 = subprocess.Popen(['cat', os.path.join(work_dir, 'exon_quant')], stdout=subprocess.PIPE) p2 = subprocess.Popen(['tr', '":"', '"\t"'], stdin=p1.stdout, stdout=subprocess.PIPE) p3 = subprocess.Popen(['tr', '"-"', '"\t"'], stdin=p2.stdout, stdout=subprocess.PIPE) with open(os.path.join(work_dir, 'exon_quant.bed'), 'w') as f: subprocess.check_call(['cut', '-f1-4'], stdin=p3.stdout, stdout=f) # Create zip, upload to fileStore, and move to output_dir as a backup output_files = ['exon_quant.bed', 'exon_quant'] tarball_files(work_dir, tar_name='exon.tar.gz', uuid=uuid, files=output_files) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'exon.tar.gz'))
python
def exon_count(job, job_vars): """ Produces exon counts job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] sudo = input_args['sudo'] # I/O sort_by_ref, normalize_pl, composite_bed = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam', 'normalize.pl', 'composite_exons.bed') # Command tool = 'jvivian/bedtools' cmd_1 = ['coverage', '-split', '-abam', docker_path(sort_by_ref), '-b', docker_path(composite_bed)] cmd_2 = ['perl', os.path.join(work_dir, 'normalize.pl'), sort_by_ref, composite_bed] popen_docker = ['docker', 'run', '-v', '{}:/data'.format(work_dir), tool] if sudo: popen_docker = ['sudo'] + popen_docker p = subprocess.Popen(popen_docker + cmd_1, stdout=subprocess.PIPE) with open(os.path.join(work_dir, 'exon_quant'), 'w') as f: subprocess.check_call(cmd_2, stdin=p.stdout, stdout=f) p1 = subprocess.Popen(['cat', os.path.join(work_dir, 'exon_quant')], stdout=subprocess.PIPE) p2 = subprocess.Popen(['tr', '":"', '"\t"'], stdin=p1.stdout, stdout=subprocess.PIPE) p3 = subprocess.Popen(['tr', '"-"', '"\t"'], stdin=p2.stdout, stdout=subprocess.PIPE) with open(os.path.join(work_dir, 'exon_quant.bed'), 'w') as f: subprocess.check_call(['cut', '-f1-4'], stdin=p3.stdout, stdout=f) # Create zip, upload to fileStore, and move to output_dir as a backup output_files = ['exon_quant.bed', 'exon_quant'] tarball_files(work_dir, tar_name='exon.tar.gz', uuid=uuid, files=output_files) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'exon.tar.gz'))
[ "def", "exon_count", "(", "job", ",", "job_vars", ")", ":", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "uuid", "=", "input_args", "[", "'uuid'", "]", "sudo", "=", "input_args", "[", "'sudo'", "]", "# I/O", "sort_by_ref", ",", "normalize_pl", ",", "composite_bed", "=", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'sort_by_ref.bam'", ",", "'normalize.pl'", ",", "'composite_exons.bed'", ")", "# Command", "tool", "=", "'jvivian/bedtools'", "cmd_1", "=", "[", "'coverage'", ",", "'-split'", ",", "'-abam'", ",", "docker_path", "(", "sort_by_ref", ")", ",", "'-b'", ",", "docker_path", "(", "composite_bed", ")", "]", "cmd_2", "=", "[", "'perl'", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'normalize.pl'", ")", ",", "sort_by_ref", ",", "composite_bed", "]", "popen_docker", "=", "[", "'docker'", ",", "'run'", ",", "'-v'", ",", "'{}:/data'", ".", "format", "(", "work_dir", ")", ",", "tool", "]", "if", "sudo", ":", "popen_docker", "=", "[", "'sudo'", "]", "+", "popen_docker", "p", "=", "subprocess", ".", "Popen", "(", "popen_docker", "+", "cmd_1", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'exon_quant'", ")", ",", "'w'", ")", "as", "f", ":", "subprocess", ".", "check_call", "(", "cmd_2", ",", "stdin", "=", "p", ".", "stdout", ",", "stdout", "=", "f", ")", "p1", "=", "subprocess", ".", "Popen", "(", "[", "'cat'", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'exon_quant'", ")", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "p2", "=", "subprocess", ".", "Popen", "(", "[", "'tr'", ",", "'\":\"'", ",", "'\"\\t\"'", "]", ",", "stdin", "=", "p1", ".", "stdout", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "p3", "=", "subprocess", ".", "Popen", "(", "[", "'tr'", ",", "'\"-\"'", ",", "'\"\\t\"'", "]", ",", "stdin", "=", "p2", ".", "stdout", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'exon_quant.bed'", ")", ",", "'w'", ")", "as", "f", ":", "subprocess", ".", "check_call", "(", "[", "'cut'", ",", "'-f1-4'", "]", ",", "stdin", "=", "p3", ".", "stdout", ",", "stdout", "=", "f", ")", "# Create zip, upload to fileStore, and move to output_dir as a backup", "output_files", "=", "[", "'exon_quant.bed'", ",", "'exon_quant'", "]", "tarball_files", "(", "work_dir", ",", "tar_name", "=", "'exon.tar.gz'", ",", "uuid", "=", "uuid", ",", "files", "=", "output_files", ")", "return", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'exon.tar.gz'", ")", ")" ]
Produces exon counts job_vars: tuple Tuple of dictionaries: input_args and ids
[ "Produces", "exon", "counts" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L666-L705
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
transcriptome
def transcriptome(job, job_vars): """ Creates a bam of just the transcriptome job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # I/O sort_by_ref, bed, hg19_fa = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam', 'unc.bed', 'hg19.transcripts.fa') output = os.path.join(work_dir, 'transcriptome.bam') # Command parameters = ['sam-xlate', '--bed', docker_path(bed), '--in', docker_path(sort_by_ref), '--order', docker_path(hg19_fa), '--out', docker_path(output), '--xgtag', '--reverse'] docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843', tool_parameters=parameters, work_dir=work_dir, java_opts='-Xmx30g', sudo=sudo) # Write to FileStore ids['transcriptome.bam'] = job.fileStore.writeGlobalFile(output) # Run child job return job.addChildJobFn(filter_bam, job_vars, memory='30G', disk='30G').rv()
python
def transcriptome(job, job_vars): """ Creates a bam of just the transcriptome job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # I/O sort_by_ref, bed, hg19_fa = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam', 'unc.bed', 'hg19.transcripts.fa') output = os.path.join(work_dir, 'transcriptome.bam') # Command parameters = ['sam-xlate', '--bed', docker_path(bed), '--in', docker_path(sort_by_ref), '--order', docker_path(hg19_fa), '--out', docker_path(output), '--xgtag', '--reverse'] docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843', tool_parameters=parameters, work_dir=work_dir, java_opts='-Xmx30g', sudo=sudo) # Write to FileStore ids['transcriptome.bam'] = job.fileStore.writeGlobalFile(output) # Run child job return job.addChildJobFn(filter_bam, job_vars, memory='30G', disk='30G').rv()
[ "def", "transcriptome", "(", "job", ",", "job_vars", ")", ":", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "sudo", "=", "input_args", "[", "'sudo'", "]", "# I/O", "sort_by_ref", ",", "bed", ",", "hg19_fa", "=", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'sort_by_ref.bam'", ",", "'unc.bed'", ",", "'hg19.transcripts.fa'", ")", "output", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'transcriptome.bam'", ")", "# Command", "parameters", "=", "[", "'sam-xlate'", ",", "'--bed'", ",", "docker_path", "(", "bed", ")", ",", "'--in'", ",", "docker_path", "(", "sort_by_ref", ")", ",", "'--order'", ",", "docker_path", "(", "hg19_fa", ")", ",", "'--out'", ",", "docker_path", "(", "output", ")", ",", "'--xgtag'", ",", "'--reverse'", "]", "docker_call", "(", "tool", "=", "'quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843'", ",", "tool_parameters", "=", "parameters", ",", "work_dir", "=", "work_dir", ",", "java_opts", "=", "'-Xmx30g'", ",", "sudo", "=", "sudo", ")", "# Write to FileStore", "ids", "[", "'transcriptome.bam'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "output", ")", "# Run child job", "return", "job", ".", "addChildJobFn", "(", "filter_bam", ",", "job_vars", ",", "memory", "=", "'30G'", ",", "disk", "=", "'30G'", ")", ".", "rv", "(", ")" ]
Creates a bam of just the transcriptome job_vars: tuple Tuple of dictionaries: input_args and ids
[ "Creates", "a", "bam", "of", "just", "the", "transcriptome" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L708-L734
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
filter_bam
def filter_bam(job, job_vars): """ Performs filtering on the transcriptome bam job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cores = input_args['cpu_count'] sudo = input_args['sudo'] # I/O transcriptome_bam = return_input_paths(job, work_dir, ids, 'transcriptome.bam') output = os.path.join(work_dir, 'filtered.bam') # Command parameters = ['sam-filter', '--strip-indels', '--max-insert', '1000', '--mapq', '1', '--in', docker_path(transcriptome_bam), '--out', docker_path(output)] docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843', tool_parameters=parameters, work_dir=os.path.dirname(output), java_opts='-Xmx30g', sudo=sudo) # Write to FileStore ids['filtered.bam'] = job.fileStore.writeGlobalFile(output) # Run child job return job.addChildJobFn(rsem, job_vars, cores=cores, disk='30 G').rv()
python
def filter_bam(job, job_vars): """ Performs filtering on the transcriptome bam job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cores = input_args['cpu_count'] sudo = input_args['sudo'] # I/O transcriptome_bam = return_input_paths(job, work_dir, ids, 'transcriptome.bam') output = os.path.join(work_dir, 'filtered.bam') # Command parameters = ['sam-filter', '--strip-indels', '--max-insert', '1000', '--mapq', '1', '--in', docker_path(transcriptome_bam), '--out', docker_path(output)] docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843', tool_parameters=parameters, work_dir=os.path.dirname(output), java_opts='-Xmx30g', sudo=sudo) # Write to FileStore ids['filtered.bam'] = job.fileStore.writeGlobalFile(output) # Run child job return job.addChildJobFn(rsem, job_vars, cores=cores, disk='30 G').rv()
[ "def", "filter_bam", "(", "job", ",", "job_vars", ")", ":", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "cores", "=", "input_args", "[", "'cpu_count'", "]", "sudo", "=", "input_args", "[", "'sudo'", "]", "# I/O", "transcriptome_bam", "=", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'transcriptome.bam'", ")", "output", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'filtered.bam'", ")", "# Command", "parameters", "=", "[", "'sam-filter'", ",", "'--strip-indels'", ",", "'--max-insert'", ",", "'1000'", ",", "'--mapq'", ",", "'1'", ",", "'--in'", ",", "docker_path", "(", "transcriptome_bam", ")", ",", "'--out'", ",", "docker_path", "(", "output", ")", "]", "docker_call", "(", "tool", "=", "'quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843'", ",", "tool_parameters", "=", "parameters", ",", "work_dir", "=", "os", ".", "path", ".", "dirname", "(", "output", ")", ",", "java_opts", "=", "'-Xmx30g'", ",", "sudo", "=", "sudo", ")", "# Write to FileStore", "ids", "[", "'filtered.bam'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "output", ")", "# Run child job", "return", "job", ".", "addChildJobFn", "(", "rsem", ",", "job_vars", ",", "cores", "=", "cores", ",", "disk", "=", "'30 G'", ")", ".", "rv", "(", ")" ]
Performs filtering on the transcriptome bam job_vars: tuple Tuple of dictionaries: input_args and ids
[ "Performs", "filtering", "on", "the", "transcriptome", "bam" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L737-L762
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
rsem
def rsem(job, job_vars): """ Runs RSEM to produce counts job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cpus = input_args['cpu_count'] sudo = input_args['sudo'] single_end_reads = input_args['single_end_reads'] # I/O filtered_bam, rsem_ref = return_input_paths(job, work_dir, ids, 'filtered.bam', 'rsem_ref.zip') subprocess.check_call(['unzip', '-o', os.path.join(work_dir, 'rsem_ref.zip'), '-d', work_dir]) output_prefix = 'rsem' # Make tool call to Docker parameters = ['--quiet', '--no-qualities', '-p', str(cpus), '--forward-prob', '0.5', '--seed-length', '25', '--fragment-length-mean', '-1.0', '--bam', docker_path(filtered_bam)] if not single_end_reads: parameters.extend(['--paired-end']) parameters.extend(['/data/rsem_ref/hg19_M_rCRS_ref', output_prefix]) docker_call(tool='quay.io/ucsc_cgl/rsem:1.2.25--4e8d1b31d4028f464b3409c6558fb9dfcad73f88', tool_parameters=parameters, work_dir=work_dir, sudo=sudo) os.rename(os.path.join(work_dir, output_prefix + '.genes.results'), os.path.join(work_dir, 'rsem_gene.tab')) os.rename(os.path.join(work_dir, output_prefix + '.isoforms.results'), os.path.join(work_dir, 'rsem_isoform.tab')) # Write to FileStore ids['rsem_gene.tab'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_gene.tab')) ids['rsem_isoform.tab'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_isoform.tab')) # Run child jobs return job.addChildJobFn(rsem_postprocess, job_vars).rv()
python
def rsem(job, job_vars): """ Runs RSEM to produce counts job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cpus = input_args['cpu_count'] sudo = input_args['sudo'] single_end_reads = input_args['single_end_reads'] # I/O filtered_bam, rsem_ref = return_input_paths(job, work_dir, ids, 'filtered.bam', 'rsem_ref.zip') subprocess.check_call(['unzip', '-o', os.path.join(work_dir, 'rsem_ref.zip'), '-d', work_dir]) output_prefix = 'rsem' # Make tool call to Docker parameters = ['--quiet', '--no-qualities', '-p', str(cpus), '--forward-prob', '0.5', '--seed-length', '25', '--fragment-length-mean', '-1.0', '--bam', docker_path(filtered_bam)] if not single_end_reads: parameters.extend(['--paired-end']) parameters.extend(['/data/rsem_ref/hg19_M_rCRS_ref', output_prefix]) docker_call(tool='quay.io/ucsc_cgl/rsem:1.2.25--4e8d1b31d4028f464b3409c6558fb9dfcad73f88', tool_parameters=parameters, work_dir=work_dir, sudo=sudo) os.rename(os.path.join(work_dir, output_prefix + '.genes.results'), os.path.join(work_dir, 'rsem_gene.tab')) os.rename(os.path.join(work_dir, output_prefix + '.isoforms.results'), os.path.join(work_dir, 'rsem_isoform.tab')) # Write to FileStore ids['rsem_gene.tab'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_gene.tab')) ids['rsem_isoform.tab'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_isoform.tab')) # Run child jobs return job.addChildJobFn(rsem_postprocess, job_vars).rv()
[ "def", "rsem", "(", "job", ",", "job_vars", ")", ":", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "cpus", "=", "input_args", "[", "'cpu_count'", "]", "sudo", "=", "input_args", "[", "'sudo'", "]", "single_end_reads", "=", "input_args", "[", "'single_end_reads'", "]", "# I/O", "filtered_bam", ",", "rsem_ref", "=", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'filtered.bam'", ",", "'rsem_ref.zip'", ")", "subprocess", ".", "check_call", "(", "[", "'unzip'", ",", "'-o'", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'rsem_ref.zip'", ")", ",", "'-d'", ",", "work_dir", "]", ")", "output_prefix", "=", "'rsem'", "# Make tool call to Docker", "parameters", "=", "[", "'--quiet'", ",", "'--no-qualities'", ",", "'-p'", ",", "str", "(", "cpus", ")", ",", "'--forward-prob'", ",", "'0.5'", ",", "'--seed-length'", ",", "'25'", ",", "'--fragment-length-mean'", ",", "'-1.0'", ",", "'--bam'", ",", "docker_path", "(", "filtered_bam", ")", "]", "if", "not", "single_end_reads", ":", "parameters", ".", "extend", "(", "[", "'--paired-end'", "]", ")", "parameters", ".", "extend", "(", "[", "'/data/rsem_ref/hg19_M_rCRS_ref'", ",", "output_prefix", "]", ")", "docker_call", "(", "tool", "=", "'quay.io/ucsc_cgl/rsem:1.2.25--4e8d1b31d4028f464b3409c6558fb9dfcad73f88'", ",", "tool_parameters", "=", "parameters", ",", "work_dir", "=", "work_dir", ",", "sudo", "=", "sudo", ")", "os", ".", "rename", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "output_prefix", "+", "'.genes.results'", ")", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'rsem_gene.tab'", ")", ")", "os", ".", "rename", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "output_prefix", "+", "'.isoforms.results'", ")", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'rsem_isoform.tab'", ")", ")", "# Write to FileStore", "ids", "[", "'rsem_gene.tab'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'rsem_gene.tab'", ")", ")", "ids", "[", "'rsem_isoform.tab'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'rsem_isoform.tab'", ")", ")", "# Run child jobs", "return", "job", ".", "addChildJobFn", "(", "rsem_postprocess", ",", "job_vars", ")", ".", "rv", "(", ")" ]
Runs RSEM to produce counts job_vars: tuple Tuple of dictionaries: input_args and ids
[ "Runs", "RSEM", "to", "produce", "counts" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L765-L800
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
consolidate_output
def consolidate_output(job, job_vars, output_ids): """ Combine the contents of separate zipped outputs into one via streaming job_vars: tuple Tuple of dictionaries: input_args and ids output_ids: tuple Nested tuple of all the output fileStore IDs """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] # Retrieve IDs rseq_id, exon_id, rsem_id = flatten(output_ids) # Retrieve output file paths to consolidate # map_tar = job.fileStore.readGlobalFile(map_id, os.path.join(work_dir, 'map.tar.gz')) qc_tar = job.fileStore.readGlobalFile(rseq_id, os.path.join(work_dir, 'qc.tar.gz')) exon_tar = job.fileStore.readGlobalFile(exon_id, os.path.join(work_dir, 'exon.tar.gz')) rsem_tar = job.fileStore.readGlobalFile(rsem_id, os.path.join(work_dir, 'rsem.tar.gz')) # I/O out_tar = os.path.join(work_dir, uuid + '.tar.gz') # Consolidate separate tarballs with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out: for tar in [rsem_tar, exon_tar, qc_tar]: with tarfile.open(tar, 'r') as f_in: for tarinfo in f_in: with closing(f_in.extractfile(tarinfo)) as f_in_file: if tar == qc_tar: tarinfo.name = os.path.join(uuid, 'rseq_qc', os.path.basename(tarinfo.name)) else: tarinfo.name = os.path.join(uuid, os.path.basename(tarinfo.name)) f_out.addfile(tarinfo, fileobj=f_in_file) # Move to output directory of selected if input_args['output_dir']: output_dir = input_args['output_dir'] mkdir_p(output_dir) copy_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.tar.gz']) # Write output file to fileStore ids['uuid.tar.gz'] = job.fileStore.writeGlobalFile(out_tar) # If S3 bucket argument specified, upload to S3 if input_args['s3_dir']: job.addChildJobFn(upload_output_to_s3, job_vars)
python
def consolidate_output(job, job_vars, output_ids): """ Combine the contents of separate zipped outputs into one via streaming job_vars: tuple Tuple of dictionaries: input_args and ids output_ids: tuple Nested tuple of all the output fileStore IDs """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] # Retrieve IDs rseq_id, exon_id, rsem_id = flatten(output_ids) # Retrieve output file paths to consolidate # map_tar = job.fileStore.readGlobalFile(map_id, os.path.join(work_dir, 'map.tar.gz')) qc_tar = job.fileStore.readGlobalFile(rseq_id, os.path.join(work_dir, 'qc.tar.gz')) exon_tar = job.fileStore.readGlobalFile(exon_id, os.path.join(work_dir, 'exon.tar.gz')) rsem_tar = job.fileStore.readGlobalFile(rsem_id, os.path.join(work_dir, 'rsem.tar.gz')) # I/O out_tar = os.path.join(work_dir, uuid + '.tar.gz') # Consolidate separate tarballs with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out: for tar in [rsem_tar, exon_tar, qc_tar]: with tarfile.open(tar, 'r') as f_in: for tarinfo in f_in: with closing(f_in.extractfile(tarinfo)) as f_in_file: if tar == qc_tar: tarinfo.name = os.path.join(uuid, 'rseq_qc', os.path.basename(tarinfo.name)) else: tarinfo.name = os.path.join(uuid, os.path.basename(tarinfo.name)) f_out.addfile(tarinfo, fileobj=f_in_file) # Move to output directory of selected if input_args['output_dir']: output_dir = input_args['output_dir'] mkdir_p(output_dir) copy_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.tar.gz']) # Write output file to fileStore ids['uuid.tar.gz'] = job.fileStore.writeGlobalFile(out_tar) # If S3 bucket argument specified, upload to S3 if input_args['s3_dir']: job.addChildJobFn(upload_output_to_s3, job_vars)
[ "def", "consolidate_output", "(", "job", ",", "job_vars", ",", "output_ids", ")", ":", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "uuid", "=", "input_args", "[", "'uuid'", "]", "# Retrieve IDs", "rseq_id", ",", "exon_id", ",", "rsem_id", "=", "flatten", "(", "output_ids", ")", "# Retrieve output file paths to consolidate", "# map_tar = job.fileStore.readGlobalFile(map_id, os.path.join(work_dir, 'map.tar.gz'))", "qc_tar", "=", "job", ".", "fileStore", ".", "readGlobalFile", "(", "rseq_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'qc.tar.gz'", ")", ")", "exon_tar", "=", "job", ".", "fileStore", ".", "readGlobalFile", "(", "exon_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'exon.tar.gz'", ")", ")", "rsem_tar", "=", "job", ".", "fileStore", ".", "readGlobalFile", "(", "rsem_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'rsem.tar.gz'", ")", ")", "# I/O", "out_tar", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "uuid", "+", "'.tar.gz'", ")", "# Consolidate separate tarballs", "with", "tarfile", ".", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "out_tar", ")", ",", "'w:gz'", ")", "as", "f_out", ":", "for", "tar", "in", "[", "rsem_tar", ",", "exon_tar", ",", "qc_tar", "]", ":", "with", "tarfile", ".", "open", "(", "tar", ",", "'r'", ")", "as", "f_in", ":", "for", "tarinfo", "in", "f_in", ":", "with", "closing", "(", "f_in", ".", "extractfile", "(", "tarinfo", ")", ")", "as", "f_in_file", ":", "if", "tar", "==", "qc_tar", ":", "tarinfo", ".", "name", "=", "os", ".", "path", ".", "join", "(", "uuid", ",", "'rseq_qc'", ",", "os", ".", "path", ".", "basename", "(", "tarinfo", ".", "name", ")", ")", "else", ":", "tarinfo", ".", "name", "=", "os", ".", "path", ".", "join", "(", "uuid", ",", "os", ".", "path", ".", "basename", "(", "tarinfo", ".", "name", ")", ")", "f_out", ".", "addfile", "(", "tarinfo", ",", "fileobj", "=", "f_in_file", ")", "# Move to output directory of selected", "if", "input_args", "[", "'output_dir'", "]", ":", "output_dir", "=", "input_args", "[", "'output_dir'", "]", "mkdir_p", "(", "output_dir", ")", "copy_to_output_dir", "(", "work_dir", ",", "output_dir", ",", "uuid", "=", "None", ",", "files", "=", "[", "uuid", "+", "'.tar.gz'", "]", ")", "# Write output file to fileStore", "ids", "[", "'uuid.tar.gz'", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "out_tar", ")", "# If S3 bucket argument specified, upload to S3", "if", "input_args", "[", "'s3_dir'", "]", ":", "job", ".", "addChildJobFn", "(", "upload_output_to_s3", ",", "job_vars", ")" ]
Combine the contents of separate zipped outputs into one via streaming job_vars: tuple Tuple of dictionaries: input_args and ids output_ids: tuple Nested tuple of all the output fileStore IDs
[ "Combine", "the", "contents", "of", "separate", "zipped", "outputs", "into", "one", "via", "streaming" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L826-L865
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
upload_output_to_s3
def upload_output_to_s3(job, job_vars): """ If s3_dir is specified in arguments, file will be uploaded to S3 using boto. WARNING: ~/.boto credentials are necessary for this to succeed! job_vars: tuple Tuple of dictionaries: input_args and ids """ import boto from boto.s3.key import Key input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] # Parse s3_dir s3_dir = input_args['s3_dir'] bucket_name = s3_dir.split('/')[0] bucket_dir = '/'.join(s3_dir.split('/')[1:]) # I/O uuid_tar = return_input_paths(job, work_dir, ids, 'uuid.tar.gz') # Upload to S3 via boto conn = boto.connect_s3() bucket = conn.get_bucket(bucket_name) k = Key(bucket) k.key = os.path.join(bucket_dir, uuid + '.tar.gz') k.set_contents_from_filename(uuid_tar)
python
def upload_output_to_s3(job, job_vars): """ If s3_dir is specified in arguments, file will be uploaded to S3 using boto. WARNING: ~/.boto credentials are necessary for this to succeed! job_vars: tuple Tuple of dictionaries: input_args and ids """ import boto from boto.s3.key import Key input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] # Parse s3_dir s3_dir = input_args['s3_dir'] bucket_name = s3_dir.split('/')[0] bucket_dir = '/'.join(s3_dir.split('/')[1:]) # I/O uuid_tar = return_input_paths(job, work_dir, ids, 'uuid.tar.gz') # Upload to S3 via boto conn = boto.connect_s3() bucket = conn.get_bucket(bucket_name) k = Key(bucket) k.key = os.path.join(bucket_dir, uuid + '.tar.gz') k.set_contents_from_filename(uuid_tar)
[ "def", "upload_output_to_s3", "(", "job", ",", "job_vars", ")", ":", "import", "boto", "from", "boto", ".", "s3", ".", "key", "import", "Key", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "uuid", "=", "input_args", "[", "'uuid'", "]", "# Parse s3_dir", "s3_dir", "=", "input_args", "[", "'s3_dir'", "]", "bucket_name", "=", "s3_dir", ".", "split", "(", "'/'", ")", "[", "0", "]", "bucket_dir", "=", "'/'", ".", "join", "(", "s3_dir", ".", "split", "(", "'/'", ")", "[", "1", ":", "]", ")", "# I/O", "uuid_tar", "=", "return_input_paths", "(", "job", ",", "work_dir", ",", "ids", ",", "'uuid.tar.gz'", ")", "# Upload to S3 via boto", "conn", "=", "boto", ".", "connect_s3", "(", ")", "bucket", "=", "conn", ".", "get_bucket", "(", "bucket_name", ")", "k", "=", "Key", "(", "bucket", ")", "k", ".", "key", "=", "os", ".", "path", ".", "join", "(", "bucket_dir", ",", "uuid", "+", "'.tar.gz'", ")", "k", ".", "set_contents_from_filename", "(", "uuid_tar", ")" ]
If s3_dir is specified in arguments, file will be uploaded to S3 using boto. WARNING: ~/.boto credentials are necessary for this to succeed! job_vars: tuple Tuple of dictionaries: input_args and ids
[ "If", "s3_dir", "is", "specified", "in", "arguments", "file", "will", "be", "uploaded", "to", "S3", "using", "boto", ".", "WARNING", ":", "~", "/", ".", "boto", "credentials", "are", "necessary", "for", "this", "to", "succeed!" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L868-L892
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
upload_bam_to_s3
def upload_bam_to_s3(job, job_vars): """ Upload bam to S3. Requires S3AM and a ~/.boto config file. """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] # I/O job.fileStore.readGlobalFile(ids['alignments.bam'], os.path.join(work_dir, 'alignments.bam')) bam_path = os.path.join(work_dir, 'alignments.bam') sample_name = uuid + '.bam' # Parse s3_dir to get bucket and s3 path s3_dir = input_args['s3_dir'] bucket_name = s3_dir.split('/')[0] bucket_dir = os.path.join('/'.join(s3_dir.split('/')[1:]), 'bam_files') # Upload to S3 via S3AM s3am_command = ['s3am', 'upload', 'file://{}'.format(bam_path), os.path.join('s3://', bucket_name, bucket_dir, sample_name)] subprocess.check_call(s3am_command)
python
def upload_bam_to_s3(job, job_vars): """ Upload bam to S3. Requires S3AM and a ~/.boto config file. """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] # I/O job.fileStore.readGlobalFile(ids['alignments.bam'], os.path.join(work_dir, 'alignments.bam')) bam_path = os.path.join(work_dir, 'alignments.bam') sample_name = uuid + '.bam' # Parse s3_dir to get bucket and s3 path s3_dir = input_args['s3_dir'] bucket_name = s3_dir.split('/')[0] bucket_dir = os.path.join('/'.join(s3_dir.split('/')[1:]), 'bam_files') # Upload to S3 via S3AM s3am_command = ['s3am', 'upload', 'file://{}'.format(bam_path), os.path.join('s3://', bucket_name, bucket_dir, sample_name)] subprocess.check_call(s3am_command)
[ "def", "upload_bam_to_s3", "(", "job", ",", "job_vars", ")", ":", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "uuid", "=", "input_args", "[", "'uuid'", "]", "# I/O", "job", ".", "fileStore", ".", "readGlobalFile", "(", "ids", "[", "'alignments.bam'", "]", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'alignments.bam'", ")", ")", "bam_path", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'alignments.bam'", ")", "sample_name", "=", "uuid", "+", "'.bam'", "# Parse s3_dir to get bucket and s3 path", "s3_dir", "=", "input_args", "[", "'s3_dir'", "]", "bucket_name", "=", "s3_dir", ".", "split", "(", "'/'", ")", "[", "0", "]", "bucket_dir", "=", "os", ".", "path", ".", "join", "(", "'/'", ".", "join", "(", "s3_dir", ".", "split", "(", "'/'", ")", "[", "1", ":", "]", ")", ",", "'bam_files'", ")", "# Upload to S3 via S3AM", "s3am_command", "=", "[", "'s3am'", ",", "'upload'", ",", "'file://{}'", ".", "format", "(", "bam_path", ")", ",", "os", ".", "path", ".", "join", "(", "'s3://'", ",", "bucket_name", ",", "bucket_dir", ",", "sample_name", ")", "]", "subprocess", ".", "check_call", "(", "s3am_command", ")" ]
Upload bam to S3. Requires S3AM and a ~/.boto config file.
[ "Upload", "bam", "to", "S3", ".", "Requires", "S3AM", "and", "a", "~", "/", ".", "boto", "config", "file", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L895-L915
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
main
def main(): """ This is a Toil pipeline for the UNC best practice RNA-Seq analysis. RNA-seq fastqs are combined, aligned, sorted, filtered, and quantified. Please read the README.md located in the same directory. """ # Define Parser object and add to toil parser = build_parser() Job.Runner.addToilOptions(parser) args = parser.parse_args() # Store inputs from argparse inputs = {'config': args.config, 'config_fastq': args.config_fastq, 'input': args.input, 'unc.bed': args.unc, 'hg19.transcripts.fa': args.fasta, 'composite_exons.bed': args.composite_exons, 'normalize.pl': args.normalize, 'output_dir': args.output_dir, 'rsem_ref.zip': args.rsem_ref, 'chromosomes.zip': args.chromosomes, 'ebwt.zip': args.ebwt, 'ssec': args.ssec, 's3_dir': args.s3_dir, 'sudo': args.sudo, 'single_end_reads': args.single_end_reads, 'upload_bam_to_s3': args.upload_bam_to_s3, 'uuid': None, 'sample.tar': None, 'cpu_count': None} # Launch jobs Job.Runner.startToil(Job.wrapJobFn(download_shared_files, inputs), args)
python
def main(): """ This is a Toil pipeline for the UNC best practice RNA-Seq analysis. RNA-seq fastqs are combined, aligned, sorted, filtered, and quantified. Please read the README.md located in the same directory. """ # Define Parser object and add to toil parser = build_parser() Job.Runner.addToilOptions(parser) args = parser.parse_args() # Store inputs from argparse inputs = {'config': args.config, 'config_fastq': args.config_fastq, 'input': args.input, 'unc.bed': args.unc, 'hg19.transcripts.fa': args.fasta, 'composite_exons.bed': args.composite_exons, 'normalize.pl': args.normalize, 'output_dir': args.output_dir, 'rsem_ref.zip': args.rsem_ref, 'chromosomes.zip': args.chromosomes, 'ebwt.zip': args.ebwt, 'ssec': args.ssec, 's3_dir': args.s3_dir, 'sudo': args.sudo, 'single_end_reads': args.single_end_reads, 'upload_bam_to_s3': args.upload_bam_to_s3, 'uuid': None, 'sample.tar': None, 'cpu_count': None} # Launch jobs Job.Runner.startToil(Job.wrapJobFn(download_shared_files, inputs), args)
[ "def", "main", "(", ")", ":", "# Define Parser object and add to toil", "parser", "=", "build_parser", "(", ")", "Job", ".", "Runner", ".", "addToilOptions", "(", "parser", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "# Store inputs from argparse", "inputs", "=", "{", "'config'", ":", "args", ".", "config", ",", "'config_fastq'", ":", "args", ".", "config_fastq", ",", "'input'", ":", "args", ".", "input", ",", "'unc.bed'", ":", "args", ".", "unc", ",", "'hg19.transcripts.fa'", ":", "args", ".", "fasta", ",", "'composite_exons.bed'", ":", "args", ".", "composite_exons", ",", "'normalize.pl'", ":", "args", ".", "normalize", ",", "'output_dir'", ":", "args", ".", "output_dir", ",", "'rsem_ref.zip'", ":", "args", ".", "rsem_ref", ",", "'chromosomes.zip'", ":", "args", ".", "chromosomes", ",", "'ebwt.zip'", ":", "args", ".", "ebwt", ",", "'ssec'", ":", "args", ".", "ssec", ",", "'s3_dir'", ":", "args", ".", "s3_dir", ",", "'sudo'", ":", "args", ".", "sudo", ",", "'single_end_reads'", ":", "args", ".", "single_end_reads", ",", "'upload_bam_to_s3'", ":", "args", ".", "upload_bam_to_s3", ",", "'uuid'", ":", "None", ",", "'sample.tar'", ":", "None", ",", "'cpu_count'", ":", "None", "}", "# Launch jobs", "Job", ".", "Runner", ".", "startToil", "(", "Job", ".", "wrapJobFn", "(", "download_shared_files", ",", "inputs", ")", ",", "args", ")" ]
This is a Toil pipeline for the UNC best practice RNA-Seq analysis. RNA-seq fastqs are combined, aligned, sorted, filtered, and quantified. Please read the README.md located in the same directory.
[ "This", "is", "a", "Toil", "pipeline", "for", "the", "UNC", "best", "practice", "RNA", "-", "Seq", "analysis", ".", "RNA", "-", "seq", "fastqs", "are", "combined", "aligned", "sorted", "filtered", "and", "quantified", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L918-L951
BD2KGenomics/toil-scripts
src/toil_scripts/adam_pipeline/adam_preprocessing.py
remove_file
def remove_file(master_ip, filename, spark_on_toil): """ Remove the given file from hdfs with master at the given IP address :type masterIP: MasterAddress """ master_ip = master_ip.actual ssh_call = ['ssh', '-o', 'StrictHostKeyChecking=no', master_ip] if spark_on_toil: output = check_output(ssh_call + ['docker', 'ps']) container_id = next(line.split()[0] for line in output.splitlines() if 'apache-hadoop-master' in line) ssh_call += ['docker', 'exec', container_id] try: check_call(ssh_call + ['hdfs', 'dfs', '-rm', '-r', '/' + filename]) except: pass
python
def remove_file(master_ip, filename, spark_on_toil): """ Remove the given file from hdfs with master at the given IP address :type masterIP: MasterAddress """ master_ip = master_ip.actual ssh_call = ['ssh', '-o', 'StrictHostKeyChecking=no', master_ip] if spark_on_toil: output = check_output(ssh_call + ['docker', 'ps']) container_id = next(line.split()[0] for line in output.splitlines() if 'apache-hadoop-master' in line) ssh_call += ['docker', 'exec', container_id] try: check_call(ssh_call + ['hdfs', 'dfs', '-rm', '-r', '/' + filename]) except: pass
[ "def", "remove_file", "(", "master_ip", ",", "filename", ",", "spark_on_toil", ")", ":", "master_ip", "=", "master_ip", ".", "actual", "ssh_call", "=", "[", "'ssh'", ",", "'-o'", ",", "'StrictHostKeyChecking=no'", ",", "master_ip", "]", "if", "spark_on_toil", ":", "output", "=", "check_output", "(", "ssh_call", "+", "[", "'docker'", ",", "'ps'", "]", ")", "container_id", "=", "next", "(", "line", ".", "split", "(", ")", "[", "0", "]", "for", "line", "in", "output", ".", "splitlines", "(", ")", "if", "'apache-hadoop-master'", "in", "line", ")", "ssh_call", "+=", "[", "'docker'", ",", "'exec'", ",", "container_id", "]", "try", ":", "check_call", "(", "ssh_call", "+", "[", "'hdfs'", ",", "'dfs'", ",", "'-rm'", ",", "'-r'", ",", "'/'", "+", "filename", "]", ")", "except", ":", "pass" ]
Remove the given file from hdfs with master at the given IP address :type masterIP: MasterAddress
[ "Remove", "the", "given", "file", "from", "hdfs", "with", "master", "at", "the", "given", "IP", "address" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_pipeline/adam_preprocessing.py#L52-L70
BD2KGenomics/toil-scripts
src/toil_scripts/adam_pipeline/adam_preprocessing.py
download_data
def download_data(job, master_ip, inputs, known_snps, bam, hdfs_snps, hdfs_bam): """ Downloads input data files from S3. :type masterIP: MasterAddress """ log.info("Downloading known sites file %s to %s.", known_snps, hdfs_snps) call_conductor(job, master_ip, known_snps, hdfs_snps, memory=inputs.memory) log.info("Downloading input BAM %s to %s.", bam, hdfs_bam) call_conductor(job, master_ip, bam, hdfs_bam, memory=inputs.memory)
python
def download_data(job, master_ip, inputs, known_snps, bam, hdfs_snps, hdfs_bam): """ Downloads input data files from S3. :type masterIP: MasterAddress """ log.info("Downloading known sites file %s to %s.", known_snps, hdfs_snps) call_conductor(job, master_ip, known_snps, hdfs_snps, memory=inputs.memory) log.info("Downloading input BAM %s to %s.", bam, hdfs_bam) call_conductor(job, master_ip, bam, hdfs_bam, memory=inputs.memory)
[ "def", "download_data", "(", "job", ",", "master_ip", ",", "inputs", ",", "known_snps", ",", "bam", ",", "hdfs_snps", ",", "hdfs_bam", ")", ":", "log", ".", "info", "(", "\"Downloading known sites file %s to %s.\"", ",", "known_snps", ",", "hdfs_snps", ")", "call_conductor", "(", "job", ",", "master_ip", ",", "known_snps", ",", "hdfs_snps", ",", "memory", "=", "inputs", ".", "memory", ")", "log", ".", "info", "(", "\"Downloading input BAM %s to %s.\"", ",", "bam", ",", "hdfs_bam", ")", "call_conductor", "(", "job", ",", "master_ip", ",", "bam", ",", "hdfs_bam", ",", "memory", "=", "inputs", ".", "memory", ")" ]
Downloads input data files from S3. :type masterIP: MasterAddress
[ "Downloads", "input", "data", "files", "from", "S3", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_pipeline/adam_preprocessing.py#L96-L107
BD2KGenomics/toil-scripts
src/toil_scripts/adam_pipeline/adam_preprocessing.py
adam_convert
def adam_convert(job, master_ip, inputs, in_file, in_snps, adam_file, adam_snps, spark_on_toil): """ Convert input sam/bam file and known SNPs file into ADAM format """ log.info("Converting input BAM to ADAM.") call_adam(job, master_ip, ["transform", in_file, adam_file], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) in_file_name = in_file.split("/")[-1] remove_file(master_ip, in_file_name, spark_on_toil) log.info("Converting known sites VCF to ADAM.") call_adam(job, master_ip, ["vcf2adam", "-only_variants", in_snps, adam_snps], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) in_snps_name = in_snps.split("/")[-1] remove_file(master_ip, in_snps_name, spark_on_toil)
python
def adam_convert(job, master_ip, inputs, in_file, in_snps, adam_file, adam_snps, spark_on_toil): """ Convert input sam/bam file and known SNPs file into ADAM format """ log.info("Converting input BAM to ADAM.") call_adam(job, master_ip, ["transform", in_file, adam_file], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) in_file_name = in_file.split("/")[-1] remove_file(master_ip, in_file_name, spark_on_toil) log.info("Converting known sites VCF to ADAM.") call_adam(job, master_ip, ["vcf2adam", "-only_variants", in_snps, adam_snps], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) in_snps_name = in_snps.split("/")[-1] remove_file(master_ip, in_snps_name, spark_on_toil)
[ "def", "adam_convert", "(", "job", ",", "master_ip", ",", "inputs", ",", "in_file", ",", "in_snps", ",", "adam_file", ",", "adam_snps", ",", "spark_on_toil", ")", ":", "log", ".", "info", "(", "\"Converting input BAM to ADAM.\"", ")", "call_adam", "(", "job", ",", "master_ip", ",", "[", "\"transform\"", ",", "in_file", ",", "adam_file", "]", ",", "memory", "=", "inputs", ".", "memory", ",", "run_local", "=", "inputs", ".", "run_local", ",", "native_adam_path", "=", "inputs", ".", "native_adam_path", ")", "in_file_name", "=", "in_file", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "remove_file", "(", "master_ip", ",", "in_file_name", ",", "spark_on_toil", ")", "log", ".", "info", "(", "\"Converting known sites VCF to ADAM.\"", ")", "call_adam", "(", "job", ",", "master_ip", ",", "[", "\"vcf2adam\"", ",", "\"-only_variants\"", ",", "in_snps", ",", "adam_snps", "]", ",", "memory", "=", "inputs", ".", "memory", ",", "run_local", "=", "inputs", ".", "run_local", ",", "native_adam_path", "=", "inputs", ".", "native_adam_path", ")", "in_snps_name", "=", "in_snps", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "remove_file", "(", "master_ip", ",", "in_snps_name", ",", "spark_on_toil", ")" ]
Convert input sam/bam file and known SNPs file into ADAM format
[ "Convert", "input", "sam", "/", "bam", "file", "and", "known", "SNPs", "file", "into", "ADAM", "format" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_pipeline/adam_preprocessing.py#L110-L134
BD2KGenomics/toil-scripts
src/toil_scripts/adam_pipeline/adam_preprocessing.py
adam_transform
def adam_transform(job, master_ip, inputs, in_file, snp_file, hdfs_dir, out_file, spark_on_toil): """ Preprocess in_file with known SNPs snp_file: - mark duplicates - realign indels - recalibrate base quality scores """ log.info("Marking duplicate reads.") call_adam(job, master_ip, ["transform", in_file, hdfs_dir + "/mkdups.adam", "-aligned_read_predicate", "-limit_projection", "-mark_duplicate_reads"], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) #FIXME in_file_name = in_file.split("/")[-1] remove_file(master_ip, in_file_name + "*", spark_on_toil) log.info("Realigning INDELs.") call_adam(job, master_ip, ["transform", hdfs_dir + "/mkdups.adam", hdfs_dir + "/ri.adam", "-realign_indels"], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) remove_file(master_ip, hdfs_dir + "/mkdups.adam*", spark_on_toil) log.info("Recalibrating base quality scores.") call_adam(job, master_ip, ["transform", hdfs_dir + "/ri.adam", hdfs_dir + "/bqsr.adam", "-recalibrate_base_qualities", "-known_snps", snp_file], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) remove_file(master_ip, "ri.adam*", spark_on_toil) log.info("Sorting reads and saving a single BAM file.") call_adam(job, master_ip, ["transform", hdfs_dir + "/bqsr.adam", out_file, "-sort_reads", "-single"], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) remove_file(master_ip, "bqsr.adam*", spark_on_toil) return out_file
python
def adam_transform(job, master_ip, inputs, in_file, snp_file, hdfs_dir, out_file, spark_on_toil): """ Preprocess in_file with known SNPs snp_file: - mark duplicates - realign indels - recalibrate base quality scores """ log.info("Marking duplicate reads.") call_adam(job, master_ip, ["transform", in_file, hdfs_dir + "/mkdups.adam", "-aligned_read_predicate", "-limit_projection", "-mark_duplicate_reads"], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) #FIXME in_file_name = in_file.split("/")[-1] remove_file(master_ip, in_file_name + "*", spark_on_toil) log.info("Realigning INDELs.") call_adam(job, master_ip, ["transform", hdfs_dir + "/mkdups.adam", hdfs_dir + "/ri.adam", "-realign_indels"], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) remove_file(master_ip, hdfs_dir + "/mkdups.adam*", spark_on_toil) log.info("Recalibrating base quality scores.") call_adam(job, master_ip, ["transform", hdfs_dir + "/ri.adam", hdfs_dir + "/bqsr.adam", "-recalibrate_base_qualities", "-known_snps", snp_file], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) remove_file(master_ip, "ri.adam*", spark_on_toil) log.info("Sorting reads and saving a single BAM file.") call_adam(job, master_ip, ["transform", hdfs_dir + "/bqsr.adam", out_file, "-sort_reads", "-single"], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) remove_file(master_ip, "bqsr.adam*", spark_on_toil) return out_file
[ "def", "adam_transform", "(", "job", ",", "master_ip", ",", "inputs", ",", "in_file", ",", "snp_file", ",", "hdfs_dir", ",", "out_file", ",", "spark_on_toil", ")", ":", "log", ".", "info", "(", "\"Marking duplicate reads.\"", ")", "call_adam", "(", "job", ",", "master_ip", ",", "[", "\"transform\"", ",", "in_file", ",", "hdfs_dir", "+", "\"/mkdups.adam\"", ",", "\"-aligned_read_predicate\"", ",", "\"-limit_projection\"", ",", "\"-mark_duplicate_reads\"", "]", ",", "memory", "=", "inputs", ".", "memory", ",", "run_local", "=", "inputs", ".", "run_local", ",", "native_adam_path", "=", "inputs", ".", "native_adam_path", ")", "#FIXME", "in_file_name", "=", "in_file", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "remove_file", "(", "master_ip", ",", "in_file_name", "+", "\"*\"", ",", "spark_on_toil", ")", "log", ".", "info", "(", "\"Realigning INDELs.\"", ")", "call_adam", "(", "job", ",", "master_ip", ",", "[", "\"transform\"", ",", "hdfs_dir", "+", "\"/mkdups.adam\"", ",", "hdfs_dir", "+", "\"/ri.adam\"", ",", "\"-realign_indels\"", "]", ",", "memory", "=", "inputs", ".", "memory", ",", "run_local", "=", "inputs", ".", "run_local", ",", "native_adam_path", "=", "inputs", ".", "native_adam_path", ")", "remove_file", "(", "master_ip", ",", "hdfs_dir", "+", "\"/mkdups.adam*\"", ",", "spark_on_toil", ")", "log", ".", "info", "(", "\"Recalibrating base quality scores.\"", ")", "call_adam", "(", "job", ",", "master_ip", ",", "[", "\"transform\"", ",", "hdfs_dir", "+", "\"/ri.adam\"", ",", "hdfs_dir", "+", "\"/bqsr.adam\"", ",", "\"-recalibrate_base_qualities\"", ",", "\"-known_snps\"", ",", "snp_file", "]", ",", "memory", "=", "inputs", ".", "memory", ",", "run_local", "=", "inputs", ".", "run_local", ",", "native_adam_path", "=", "inputs", ".", "native_adam_path", ")", "remove_file", "(", "master_ip", ",", "\"ri.adam*\"", ",", "spark_on_toil", ")", "log", ".", "info", "(", "\"Sorting reads and saving a single BAM file.\"", ")", "call_adam", "(", "job", ",", "master_ip", ",", "[", "\"transform\"", ",", "hdfs_dir", "+", "\"/bqsr.adam\"", ",", "out_file", ",", "\"-sort_reads\"", ",", "\"-single\"", "]", ",", "memory", "=", "inputs", ".", "memory", ",", "run_local", "=", "inputs", ".", "run_local", ",", "native_adam_path", "=", "inputs", ".", "native_adam_path", ")", "remove_file", "(", "master_ip", ",", "\"bqsr.adam*\"", ",", "spark_on_toil", ")", "return", "out_file" ]
Preprocess in_file with known SNPs snp_file: - mark duplicates - realign indels - recalibrate base quality scores
[ "Preprocess", "in_file", "with", "known", "SNPs", "snp_file", ":", "-", "mark", "duplicates", "-", "realign", "indels", "-", "recalibrate", "base", "quality", "scores" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_pipeline/adam_preprocessing.py#L137-L197
BD2KGenomics/toil-scripts
src/toil_scripts/adam_pipeline/adam_preprocessing.py
upload_data
def upload_data(job, master_ip, inputs, hdfs_name, upload_name, spark_on_toil): """ Upload file hdfsName from hdfs to s3 """ if mock_mode(): truncate_file(master_ip, hdfs_name, spark_on_toil) log.info("Uploading output BAM %s to %s.", hdfs_name, upload_name) call_conductor(job, master_ip, hdfs_name, upload_name, memory=inputs.memory) remove_file(master_ip, hdfs_name, spark_on_toil)
python
def upload_data(job, master_ip, inputs, hdfs_name, upload_name, spark_on_toil): """ Upload file hdfsName from hdfs to s3 """ if mock_mode(): truncate_file(master_ip, hdfs_name, spark_on_toil) log.info("Uploading output BAM %s to %s.", hdfs_name, upload_name) call_conductor(job, master_ip, hdfs_name, upload_name, memory=inputs.memory) remove_file(master_ip, hdfs_name, spark_on_toil)
[ "def", "upload_data", "(", "job", ",", "master_ip", ",", "inputs", ",", "hdfs_name", ",", "upload_name", ",", "spark_on_toil", ")", ":", "if", "mock_mode", "(", ")", ":", "truncate_file", "(", "master_ip", ",", "hdfs_name", ",", "spark_on_toil", ")", "log", ".", "info", "(", "\"Uploading output BAM %s to %s.\"", ",", "hdfs_name", ",", "upload_name", ")", "call_conductor", "(", "job", ",", "master_ip", ",", "hdfs_name", ",", "upload_name", ",", "memory", "=", "inputs", ".", "memory", ")", "remove_file", "(", "master_ip", ",", "hdfs_name", ",", "spark_on_toil", ")" ]
Upload file hdfsName from hdfs to s3
[ "Upload", "file", "hdfsName", "from", "hdfs", "to", "s3" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_pipeline/adam_preprocessing.py#L200-L210
BD2KGenomics/toil-scripts
src/toil_scripts/adam_pipeline/adam_preprocessing.py
download_run_and_upload
def download_run_and_upload(job, master_ip, inputs, spark_on_toil): """ Monolithic job that calls data download, conversion, transform, upload. Previously, this was not monolithic; change came in due to #126/#134. """ master_ip = MasterAddress(master_ip) bam_name = inputs.sample.split('://')[-1].split('/')[-1] sample_name = ".".join(os.path.splitext(bam_name)[:-1]) hdfs_subdir = sample_name + "-dir" if inputs.run_local: inputs.local_dir = job.fileStore.getLocalTempDir() if inputs.native_adam_path is None: hdfs_dir = "/data/" else: hdfs_dir = inputs.local_dir else: inputs.local_dir = None hdfs_dir = "hdfs://{0}:{1}/{2}".format(master_ip, HDFS_MASTER_PORT, hdfs_subdir) try: hdfs_prefix = hdfs_dir + "/" + sample_name hdfs_bam = hdfs_dir + "/" + bam_name hdfs_snps = hdfs_dir + "/" + inputs.dbsnp.split('://')[-1].split('/')[-1] if not inputs.run_local: download_data(job, master_ip, inputs, inputs.dbsnp, inputs.sample, hdfs_snps, hdfs_bam) else: copy_files([inputs.sample, inputs.dbsnp], inputs.local_dir) adam_input = hdfs_prefix + ".adam" adam_snps = hdfs_dir + "/snps.var.adam" adam_convert(job, master_ip, inputs, hdfs_bam, hdfs_snps, adam_input, adam_snps, spark_on_toil) adam_output = hdfs_prefix + ".processed.bam" adam_transform(job, master_ip, inputs, adam_input, adam_snps, hdfs_dir, adam_output, spark_on_toil) out_file = inputs.output_dir + "/" + sample_name + inputs.suffix + ".bam" if not inputs.run_local: upload_data(job, master_ip, inputs, adam_output, out_file, spark_on_toil) else: local_adam_output = "%s/%s.processed.bam" % (inputs.local_dir, sample_name) move_files([local_adam_output], inputs.output_dir) remove_file(master_ip, hdfs_subdir, spark_on_toil) except: remove_file(master_ip, hdfs_subdir, spark_on_toil) raise
python
def download_run_and_upload(job, master_ip, inputs, spark_on_toil): """ Monolithic job that calls data download, conversion, transform, upload. Previously, this was not monolithic; change came in due to #126/#134. """ master_ip = MasterAddress(master_ip) bam_name = inputs.sample.split('://')[-1].split('/')[-1] sample_name = ".".join(os.path.splitext(bam_name)[:-1]) hdfs_subdir = sample_name + "-dir" if inputs.run_local: inputs.local_dir = job.fileStore.getLocalTempDir() if inputs.native_adam_path is None: hdfs_dir = "/data/" else: hdfs_dir = inputs.local_dir else: inputs.local_dir = None hdfs_dir = "hdfs://{0}:{1}/{2}".format(master_ip, HDFS_MASTER_PORT, hdfs_subdir) try: hdfs_prefix = hdfs_dir + "/" + sample_name hdfs_bam = hdfs_dir + "/" + bam_name hdfs_snps = hdfs_dir + "/" + inputs.dbsnp.split('://')[-1].split('/')[-1] if not inputs.run_local: download_data(job, master_ip, inputs, inputs.dbsnp, inputs.sample, hdfs_snps, hdfs_bam) else: copy_files([inputs.sample, inputs.dbsnp], inputs.local_dir) adam_input = hdfs_prefix + ".adam" adam_snps = hdfs_dir + "/snps.var.adam" adam_convert(job, master_ip, inputs, hdfs_bam, hdfs_snps, adam_input, adam_snps, spark_on_toil) adam_output = hdfs_prefix + ".processed.bam" adam_transform(job, master_ip, inputs, adam_input, adam_snps, hdfs_dir, adam_output, spark_on_toil) out_file = inputs.output_dir + "/" + sample_name + inputs.suffix + ".bam" if not inputs.run_local: upload_data(job, master_ip, inputs, adam_output, out_file, spark_on_toil) else: local_adam_output = "%s/%s.processed.bam" % (inputs.local_dir, sample_name) move_files([local_adam_output], inputs.output_dir) remove_file(master_ip, hdfs_subdir, spark_on_toil) except: remove_file(master_ip, hdfs_subdir, spark_on_toil) raise
[ "def", "download_run_and_upload", "(", "job", ",", "master_ip", ",", "inputs", ",", "spark_on_toil", ")", ":", "master_ip", "=", "MasterAddress", "(", "master_ip", ")", "bam_name", "=", "inputs", ".", "sample", ".", "split", "(", "'://'", ")", "[", "-", "1", "]", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "sample_name", "=", "\".\"", ".", "join", "(", "os", ".", "path", ".", "splitext", "(", "bam_name", ")", "[", ":", "-", "1", "]", ")", "hdfs_subdir", "=", "sample_name", "+", "\"-dir\"", "if", "inputs", ".", "run_local", ":", "inputs", ".", "local_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "if", "inputs", ".", "native_adam_path", "is", "None", ":", "hdfs_dir", "=", "\"/data/\"", "else", ":", "hdfs_dir", "=", "inputs", ".", "local_dir", "else", ":", "inputs", ".", "local_dir", "=", "None", "hdfs_dir", "=", "\"hdfs://{0}:{1}/{2}\"", ".", "format", "(", "master_ip", ",", "HDFS_MASTER_PORT", ",", "hdfs_subdir", ")", "try", ":", "hdfs_prefix", "=", "hdfs_dir", "+", "\"/\"", "+", "sample_name", "hdfs_bam", "=", "hdfs_dir", "+", "\"/\"", "+", "bam_name", "hdfs_snps", "=", "hdfs_dir", "+", "\"/\"", "+", "inputs", ".", "dbsnp", ".", "split", "(", "'://'", ")", "[", "-", "1", "]", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "if", "not", "inputs", ".", "run_local", ":", "download_data", "(", "job", ",", "master_ip", ",", "inputs", ",", "inputs", ".", "dbsnp", ",", "inputs", ".", "sample", ",", "hdfs_snps", ",", "hdfs_bam", ")", "else", ":", "copy_files", "(", "[", "inputs", ".", "sample", ",", "inputs", ".", "dbsnp", "]", ",", "inputs", ".", "local_dir", ")", "adam_input", "=", "hdfs_prefix", "+", "\".adam\"", "adam_snps", "=", "hdfs_dir", "+", "\"/snps.var.adam\"", "adam_convert", "(", "job", ",", "master_ip", ",", "inputs", ",", "hdfs_bam", ",", "hdfs_snps", ",", "adam_input", ",", "adam_snps", ",", "spark_on_toil", ")", "adam_output", "=", "hdfs_prefix", "+", "\".processed.bam\"", "adam_transform", "(", "job", ",", "master_ip", ",", "inputs", ",", "adam_input", ",", "adam_snps", ",", "hdfs_dir", ",", "adam_output", ",", "spark_on_toil", ")", "out_file", "=", "inputs", ".", "output_dir", "+", "\"/\"", "+", "sample_name", "+", "inputs", ".", "suffix", "+", "\".bam\"", "if", "not", "inputs", ".", "run_local", ":", "upload_data", "(", "job", ",", "master_ip", ",", "inputs", ",", "adam_output", ",", "out_file", ",", "spark_on_toil", ")", "else", ":", "local_adam_output", "=", "\"%s/%s.processed.bam\"", "%", "(", "inputs", ".", "local_dir", ",", "sample_name", ")", "move_files", "(", "[", "local_adam_output", "]", ",", "inputs", ".", "output_dir", ")", "remove_file", "(", "master_ip", ",", "hdfs_subdir", ",", "spark_on_toil", ")", "except", ":", "remove_file", "(", "master_ip", ",", "hdfs_subdir", ",", "spark_on_toil", ")", "raise" ]
Monolithic job that calls data download, conversion, transform, upload. Previously, this was not monolithic; change came in due to #126/#134.
[ "Monolithic", "job", "that", "calls", "data", "download", "conversion", "transform", "upload", ".", "Previously", "this", "was", "not", "monolithic", ";", "change", "came", "in", "due", "to", "#126", "/", "#134", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_pipeline/adam_preprocessing.py#L213-L264
BD2KGenomics/toil-scripts
src/toil_scripts/adam_pipeline/adam_preprocessing.py
static_adam_preprocessing_dag
def static_adam_preprocessing_dag(job, inputs, sample, output_dir, suffix=''): """ A Toil job function performing ADAM preprocessing on a single sample """ inputs.sample = sample inputs.output_dir = output_dir inputs.suffix = suffix if inputs.master_ip is not None or inputs.run_local: if not inputs.run_local and inputs.master_ip == 'auto': # Static, standalone Spark cluster managed by uberscript spark_on_toil = False scale_up = job.wrapJobFn(scale_external_spark_cluster, 1) job.addChild(scale_up) spark_work = job.wrapJobFn(download_run_and_upload, inputs.master_ip, inputs, spark_on_toil) scale_up.addChild(spark_work) scale_down = job.wrapJobFn(scale_external_spark_cluster, -1) spark_work.addChild(scale_down) else: # Static, external Spark cluster spark_on_toil = False spark_work = job.wrapJobFn(download_run_and_upload, inputs.master_ip, inputs, spark_on_toil) job.addChild(spark_work) else: # Dynamic subclusters, i.e. Spark-on-Toil spark_on_toil = True cores = multiprocessing.cpu_count() master_ip = spawn_spark_cluster(job, False, # Sudo inputs.num_nodes-1, cores=cores, memory=inputs.memory) spark_work = job.wrapJobFn(download_run_and_upload, master_ip, inputs, spark_on_toil) job.addChild(spark_work)
python
def static_adam_preprocessing_dag(job, inputs, sample, output_dir, suffix=''): """ A Toil job function performing ADAM preprocessing on a single sample """ inputs.sample = sample inputs.output_dir = output_dir inputs.suffix = suffix if inputs.master_ip is not None or inputs.run_local: if not inputs.run_local and inputs.master_ip == 'auto': # Static, standalone Spark cluster managed by uberscript spark_on_toil = False scale_up = job.wrapJobFn(scale_external_spark_cluster, 1) job.addChild(scale_up) spark_work = job.wrapJobFn(download_run_and_upload, inputs.master_ip, inputs, spark_on_toil) scale_up.addChild(spark_work) scale_down = job.wrapJobFn(scale_external_spark_cluster, -1) spark_work.addChild(scale_down) else: # Static, external Spark cluster spark_on_toil = False spark_work = job.wrapJobFn(download_run_and_upload, inputs.master_ip, inputs, spark_on_toil) job.addChild(spark_work) else: # Dynamic subclusters, i.e. Spark-on-Toil spark_on_toil = True cores = multiprocessing.cpu_count() master_ip = spawn_spark_cluster(job, False, # Sudo inputs.num_nodes-1, cores=cores, memory=inputs.memory) spark_work = job.wrapJobFn(download_run_and_upload, master_ip, inputs, spark_on_toil) job.addChild(spark_work)
[ "def", "static_adam_preprocessing_dag", "(", "job", ",", "inputs", ",", "sample", ",", "output_dir", ",", "suffix", "=", "''", ")", ":", "inputs", ".", "sample", "=", "sample", "inputs", ".", "output_dir", "=", "output_dir", "inputs", ".", "suffix", "=", "suffix", "if", "inputs", ".", "master_ip", "is", "not", "None", "or", "inputs", ".", "run_local", ":", "if", "not", "inputs", ".", "run_local", "and", "inputs", ".", "master_ip", "==", "'auto'", ":", "# Static, standalone Spark cluster managed by uberscript", "spark_on_toil", "=", "False", "scale_up", "=", "job", ".", "wrapJobFn", "(", "scale_external_spark_cluster", ",", "1", ")", "job", ".", "addChild", "(", "scale_up", ")", "spark_work", "=", "job", ".", "wrapJobFn", "(", "download_run_and_upload", ",", "inputs", ".", "master_ip", ",", "inputs", ",", "spark_on_toil", ")", "scale_up", ".", "addChild", "(", "spark_work", ")", "scale_down", "=", "job", ".", "wrapJobFn", "(", "scale_external_spark_cluster", ",", "-", "1", ")", "spark_work", ".", "addChild", "(", "scale_down", ")", "else", ":", "# Static, external Spark cluster", "spark_on_toil", "=", "False", "spark_work", "=", "job", ".", "wrapJobFn", "(", "download_run_and_upload", ",", "inputs", ".", "master_ip", ",", "inputs", ",", "spark_on_toil", ")", "job", ".", "addChild", "(", "spark_work", ")", "else", ":", "# Dynamic subclusters, i.e. Spark-on-Toil", "spark_on_toil", "=", "True", "cores", "=", "multiprocessing", ".", "cpu_count", "(", ")", "master_ip", "=", "spawn_spark_cluster", "(", "job", ",", "False", ",", "# Sudo", "inputs", ".", "num_nodes", "-", "1", ",", "cores", "=", "cores", ",", "memory", "=", "inputs", ".", "memory", ")", "spark_work", "=", "job", ".", "wrapJobFn", "(", "download_run_and_upload", ",", "master_ip", ",", "inputs", ",", "spark_on_toil", ")", "job", ".", "addChild", "(", "spark_work", ")" ]
A Toil job function performing ADAM preprocessing on a single sample
[ "A", "Toil", "job", "function", "performing", "ADAM", "preprocessing", "on", "a", "single", "sample" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_pipeline/adam_preprocessing.py#L267-L303
BD2KGenomics/toil-scripts
src/toil_scripts/gatk_germline/hard_filter.py
hard_filter_pipeline
def hard_filter_pipeline(job, uuid, vcf_id, config): """ Runs GATK Hard Filtering on a Genomic VCF file and uploads the results. 0: Start 0 --> 1 --> 3 --> 5 --> 6 1: Select SNPs | | 2: Select INDELs +-> 2 --> 4 + 3: Apply SNP Filter 4: Apply INDEL Filter 5: Merge SNP and INDEL VCFs 6: Write filtered VCF to output directory :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: Unique sample identifier :param str vcf_id: VCF FileStoreID :param Namespace config: Pipeline configuration options and shared files Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.snp_filter_name Name of SNP filter for VCF header config.snp_filter_expression SNP JEXL filter expression config.indel_filter_name Name of INDEL filter for VCF header config.indel_filter_expression INDEL JEXL filter expression config.xmx Java heap size in bytes config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption :return: SNP and INDEL FileStoreIDs :rtype: tuple """ job.fileStore.logToMaster('Running Hard Filter on {}'.format(uuid)) # Get the total size of the genome reference genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size # The SelectVariants disk requirement depends on the input VCF, the genome reference files, # and the output VCF. The output VCF is smaller than the input VCF. The disk requirement # is identical for SNPs and INDELs. select_variants_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size, vcf_id, genome_ref_size) select_snps = job.wrapJobFn(gatk_select_variants, 'SNP', vcf_id, config.genome_fasta, config.genome_fai, config.genome_dict, memory=config.xmx, disk=select_variants_disk) # The VariantFiltration disk requirement depends on the input VCF, the genome reference files, # and the output VCF. The filtered VCF is smaller than the input VCF. snp_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size, select_snps.rv(), genome_ref_size) snp_filter = job.wrapJobFn(gatk_variant_filtration, select_snps.rv(), config.snp_filter_name, config.snp_filter_expression, config.genome_fasta, config.genome_fai, config.genome_dict, memory=config.xmx, disk=snp_filter_disk) select_indels = job.wrapJobFn(gatk_select_variants, 'INDEL', vcf_id, config.genome_fasta, config.genome_fai, config.genome_dict, memory=config.xmx, disk=select_variants_disk) indel_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size, select_indels.rv(), genome_ref_size) indel_filter = job.wrapJobFn(gatk_variant_filtration, select_indels.rv(), config.indel_filter_name, config.indel_filter_expression, config.genome_fasta, config.genome_fai, config.genome_dict, memory=config.xmx, disk=indel_filter_disk) # The CombineVariants disk requirement depends on the SNP and INDEL input VCFs and the # genome reference files. The combined VCF is approximately the same size as the input files. combine_vcfs_disk = PromisedRequirement(lambda vcf1, vcf2, ref_size: 2 * (vcf1.size + vcf2.size) + ref_size, indel_filter.rv(), snp_filter.rv(), genome_ref_size) combine_vcfs = job.wrapJobFn(gatk_combine_variants, {'SNPs': snp_filter.rv(), 'INDELs': indel_filter.rv()}, config.genome_fasta, config.genome_fai, config.genome_dict, merge_option='UNSORTED', # Merges variants from a single sample memory=config.xmx, disk=combine_vcfs_disk) job.addChild(select_snps) job.addChild(select_indels) select_snps.addChild(snp_filter) snp_filter.addChild(combine_vcfs) select_indels.addChild(indel_filter) indel_filter.addChild(combine_vcfs) # Output the hard filtered VCF output_dir = os.path.join(config.output_dir, uuid) output_filename = '%s.hard_filter%s.vcf' % (uuid, config.suffix) output_vcf = job.wrapJobFn(output_file_job, output_filename, combine_vcfs.rv(), output_dir, s3_key_path=config.ssec, disk=PromisedRequirement(lambda x: x.size, combine_vcfs.rv())) combine_vcfs.addChild(output_vcf) return combine_vcfs.rv()
python
def hard_filter_pipeline(job, uuid, vcf_id, config): """ Runs GATK Hard Filtering on a Genomic VCF file and uploads the results. 0: Start 0 --> 1 --> 3 --> 5 --> 6 1: Select SNPs | | 2: Select INDELs +-> 2 --> 4 + 3: Apply SNP Filter 4: Apply INDEL Filter 5: Merge SNP and INDEL VCFs 6: Write filtered VCF to output directory :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: Unique sample identifier :param str vcf_id: VCF FileStoreID :param Namespace config: Pipeline configuration options and shared files Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.snp_filter_name Name of SNP filter for VCF header config.snp_filter_expression SNP JEXL filter expression config.indel_filter_name Name of INDEL filter for VCF header config.indel_filter_expression INDEL JEXL filter expression config.xmx Java heap size in bytes config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption :return: SNP and INDEL FileStoreIDs :rtype: tuple """ job.fileStore.logToMaster('Running Hard Filter on {}'.format(uuid)) # Get the total size of the genome reference genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size # The SelectVariants disk requirement depends on the input VCF, the genome reference files, # and the output VCF. The output VCF is smaller than the input VCF. The disk requirement # is identical for SNPs and INDELs. select_variants_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size, vcf_id, genome_ref_size) select_snps = job.wrapJobFn(gatk_select_variants, 'SNP', vcf_id, config.genome_fasta, config.genome_fai, config.genome_dict, memory=config.xmx, disk=select_variants_disk) # The VariantFiltration disk requirement depends on the input VCF, the genome reference files, # and the output VCF. The filtered VCF is smaller than the input VCF. snp_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size, select_snps.rv(), genome_ref_size) snp_filter = job.wrapJobFn(gatk_variant_filtration, select_snps.rv(), config.snp_filter_name, config.snp_filter_expression, config.genome_fasta, config.genome_fai, config.genome_dict, memory=config.xmx, disk=snp_filter_disk) select_indels = job.wrapJobFn(gatk_select_variants, 'INDEL', vcf_id, config.genome_fasta, config.genome_fai, config.genome_dict, memory=config.xmx, disk=select_variants_disk) indel_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size, select_indels.rv(), genome_ref_size) indel_filter = job.wrapJobFn(gatk_variant_filtration, select_indels.rv(), config.indel_filter_name, config.indel_filter_expression, config.genome_fasta, config.genome_fai, config.genome_dict, memory=config.xmx, disk=indel_filter_disk) # The CombineVariants disk requirement depends on the SNP and INDEL input VCFs and the # genome reference files. The combined VCF is approximately the same size as the input files. combine_vcfs_disk = PromisedRequirement(lambda vcf1, vcf2, ref_size: 2 * (vcf1.size + vcf2.size) + ref_size, indel_filter.rv(), snp_filter.rv(), genome_ref_size) combine_vcfs = job.wrapJobFn(gatk_combine_variants, {'SNPs': snp_filter.rv(), 'INDELs': indel_filter.rv()}, config.genome_fasta, config.genome_fai, config.genome_dict, merge_option='UNSORTED', # Merges variants from a single sample memory=config.xmx, disk=combine_vcfs_disk) job.addChild(select_snps) job.addChild(select_indels) select_snps.addChild(snp_filter) snp_filter.addChild(combine_vcfs) select_indels.addChild(indel_filter) indel_filter.addChild(combine_vcfs) # Output the hard filtered VCF output_dir = os.path.join(config.output_dir, uuid) output_filename = '%s.hard_filter%s.vcf' % (uuid, config.suffix) output_vcf = job.wrapJobFn(output_file_job, output_filename, combine_vcfs.rv(), output_dir, s3_key_path=config.ssec, disk=PromisedRequirement(lambda x: x.size, combine_vcfs.rv())) combine_vcfs.addChild(output_vcf) return combine_vcfs.rv()
[ "def", "hard_filter_pipeline", "(", "job", ",", "uuid", ",", "vcf_id", ",", "config", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Running Hard Filter on {}'", ".", "format", "(", "uuid", ")", ")", "# Get the total size of the genome reference", "genome_ref_size", "=", "config", ".", "genome_fasta", ".", "size", "+", "config", ".", "genome_fai", ".", "size", "+", "config", ".", "genome_dict", ".", "size", "# The SelectVariants disk requirement depends on the input VCF, the genome reference files,", "# and the output VCF. The output VCF is smaller than the input VCF. The disk requirement", "# is identical for SNPs and INDELs.", "select_variants_disk", "=", "PromisedRequirement", "(", "lambda", "vcf", ",", "ref_size", ":", "2", "*", "vcf", ".", "size", "+", "ref_size", ",", "vcf_id", ",", "genome_ref_size", ")", "select_snps", "=", "job", ".", "wrapJobFn", "(", "gatk_select_variants", ",", "'SNP'", ",", "vcf_id", ",", "config", ".", "genome_fasta", ",", "config", ".", "genome_fai", ",", "config", ".", "genome_dict", ",", "memory", "=", "config", ".", "xmx", ",", "disk", "=", "select_variants_disk", ")", "# The VariantFiltration disk requirement depends on the input VCF, the genome reference files,", "# and the output VCF. The filtered VCF is smaller than the input VCF.", "snp_filter_disk", "=", "PromisedRequirement", "(", "lambda", "vcf", ",", "ref_size", ":", "2", "*", "vcf", ".", "size", "+", "ref_size", ",", "select_snps", ".", "rv", "(", ")", ",", "genome_ref_size", ")", "snp_filter", "=", "job", ".", "wrapJobFn", "(", "gatk_variant_filtration", ",", "select_snps", ".", "rv", "(", ")", ",", "config", ".", "snp_filter_name", ",", "config", ".", "snp_filter_expression", ",", "config", ".", "genome_fasta", ",", "config", ".", "genome_fai", ",", "config", ".", "genome_dict", ",", "memory", "=", "config", ".", "xmx", ",", "disk", "=", "snp_filter_disk", ")", "select_indels", "=", "job", ".", "wrapJobFn", "(", "gatk_select_variants", ",", "'INDEL'", ",", "vcf_id", ",", "config", ".", "genome_fasta", ",", "config", ".", "genome_fai", ",", "config", ".", "genome_dict", ",", "memory", "=", "config", ".", "xmx", ",", "disk", "=", "select_variants_disk", ")", "indel_filter_disk", "=", "PromisedRequirement", "(", "lambda", "vcf", ",", "ref_size", ":", "2", "*", "vcf", ".", "size", "+", "ref_size", ",", "select_indels", ".", "rv", "(", ")", ",", "genome_ref_size", ")", "indel_filter", "=", "job", ".", "wrapJobFn", "(", "gatk_variant_filtration", ",", "select_indels", ".", "rv", "(", ")", ",", "config", ".", "indel_filter_name", ",", "config", ".", "indel_filter_expression", ",", "config", ".", "genome_fasta", ",", "config", ".", "genome_fai", ",", "config", ".", "genome_dict", ",", "memory", "=", "config", ".", "xmx", ",", "disk", "=", "indel_filter_disk", ")", "# The CombineVariants disk requirement depends on the SNP and INDEL input VCFs and the", "# genome reference files. The combined VCF is approximately the same size as the input files.", "combine_vcfs_disk", "=", "PromisedRequirement", "(", "lambda", "vcf1", ",", "vcf2", ",", "ref_size", ":", "2", "*", "(", "vcf1", ".", "size", "+", "vcf2", ".", "size", ")", "+", "ref_size", ",", "indel_filter", ".", "rv", "(", ")", ",", "snp_filter", ".", "rv", "(", ")", ",", "genome_ref_size", ")", "combine_vcfs", "=", "job", ".", "wrapJobFn", "(", "gatk_combine_variants", ",", "{", "'SNPs'", ":", "snp_filter", ".", "rv", "(", ")", ",", "'INDELs'", ":", "indel_filter", ".", "rv", "(", ")", "}", ",", "config", ".", "genome_fasta", ",", "config", ".", "genome_fai", ",", "config", ".", "genome_dict", ",", "merge_option", "=", "'UNSORTED'", ",", "# Merges variants from a single sample", "memory", "=", "config", ".", "xmx", ",", "disk", "=", "combine_vcfs_disk", ")", "job", ".", "addChild", "(", "select_snps", ")", "job", ".", "addChild", "(", "select_indels", ")", "select_snps", ".", "addChild", "(", "snp_filter", ")", "snp_filter", ".", "addChild", "(", "combine_vcfs", ")", "select_indels", ".", "addChild", "(", "indel_filter", ")", "indel_filter", ".", "addChild", "(", "combine_vcfs", ")", "# Output the hard filtered VCF", "output_dir", "=", "os", ".", "path", ".", "join", "(", "config", ".", "output_dir", ",", "uuid", ")", "output_filename", "=", "'%s.hard_filter%s.vcf'", "%", "(", "uuid", ",", "config", ".", "suffix", ")", "output_vcf", "=", "job", ".", "wrapJobFn", "(", "output_file_job", ",", "output_filename", ",", "combine_vcfs", ".", "rv", "(", ")", ",", "output_dir", ",", "s3_key_path", "=", "config", ".", "ssec", ",", "disk", "=", "PromisedRequirement", "(", "lambda", "x", ":", "x", ".", "size", ",", "combine_vcfs", ".", "rv", "(", ")", ")", ")", "combine_vcfs", ".", "addChild", "(", "output_vcf", ")", "return", "combine_vcfs", ".", "rv", "(", ")" ]
Runs GATK Hard Filtering on a Genomic VCF file and uploads the results. 0: Start 0 --> 1 --> 3 --> 5 --> 6 1: Select SNPs | | 2: Select INDELs +-> 2 --> 4 + 3: Apply SNP Filter 4: Apply INDEL Filter 5: Merge SNP and INDEL VCFs 6: Write filtered VCF to output directory :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: Unique sample identifier :param str vcf_id: VCF FileStoreID :param Namespace config: Pipeline configuration options and shared files Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.snp_filter_name Name of SNP filter for VCF header config.snp_filter_expression SNP JEXL filter expression config.indel_filter_name Name of INDEL filter for VCF header config.indel_filter_expression INDEL JEXL filter expression config.xmx Java heap size in bytes config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption :return: SNP and INDEL FileStoreIDs :rtype: tuple
[ "Runs", "GATK", "Hard", "Filtering", "on", "a", "Genomic", "VCF", "file", "and", "uploads", "the", "results", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/hard_filter.py#L11-L137
BD2KGenomics/toil-scripts
src/toil_scripts/transfer_tcga_to_s3/transfer_tcga_to_s3.py
download_and_transfer_sample
def download_and_transfer_sample(job, sample, inputs): """ Downloads a sample from CGHub via GeneTorrent, then uses S3AM to transfer it to S3 input_args: dict Dictionary of input arguments analysis_id: str An analysis ID for a sample in CGHub """ analysis_id = sample[0] work_dir = job.fileStore.getLocalTempDir() folder_path = os.path.join(work_dir, os.path.basename(analysis_id)) # Acquire genetorrent key and download sample shutil.copy(inputs['genetorrent_key'], os.path.join(work_dir, 'cghub.key')) parameters = ['-vv', '-c', 'cghub.key', '-d', analysis_id] docker_call(job=job, tool='quay.io/ucsc_cgl/genetorrent:3.8.7--9911761265b6f08bc3ef09f53af05f56848d805b', work_dir=work_dir, parameters=parameters) try: sample = glob.glob(os.path.join(folder_path, '*tar*'))[0] except KeyError as e: print 'No tarfile found inside of folder: '.format(e) raise # Upload sample to S3AM key_path = inputs['ssec'] if sample.endswith('gz'): sample_name = analysis_id + '.tar.gz' shutil.move(sample, os.path.join(work_dir, sample_name)) else: sample_name = analysis_id + '.tar' shutil.move(sample, os.path.join(work_dir, sample_name)) # Parse s3_dir to get bucket and s3 path s3_dir = inputs['s3_dir'] bucket_name = s3_dir.lstrip('/').split('/')[0] base_url = 'https://s3-us-west-2.amazonaws.com/' url = os.path.join(base_url, bucket_name, sample_name) # Generate keyfile for upload with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out: f_out.write(generate_unique_key(key_path, url)) # Upload to S3 via S3AM s3am_command = ['s3am', 'upload', '--sse-key-file', os.path.join(work_dir, 'temp.key'), 'file://{}'.format(os.path.join(work_dir, sample_name)), 's3://' + bucket_name + '/'] subprocess.check_call(s3am_command)
python
def download_and_transfer_sample(job, sample, inputs): """ Downloads a sample from CGHub via GeneTorrent, then uses S3AM to transfer it to S3 input_args: dict Dictionary of input arguments analysis_id: str An analysis ID for a sample in CGHub """ analysis_id = sample[0] work_dir = job.fileStore.getLocalTempDir() folder_path = os.path.join(work_dir, os.path.basename(analysis_id)) # Acquire genetorrent key and download sample shutil.copy(inputs['genetorrent_key'], os.path.join(work_dir, 'cghub.key')) parameters = ['-vv', '-c', 'cghub.key', '-d', analysis_id] docker_call(job=job, tool='quay.io/ucsc_cgl/genetorrent:3.8.7--9911761265b6f08bc3ef09f53af05f56848d805b', work_dir=work_dir, parameters=parameters) try: sample = glob.glob(os.path.join(folder_path, '*tar*'))[0] except KeyError as e: print 'No tarfile found inside of folder: '.format(e) raise # Upload sample to S3AM key_path = inputs['ssec'] if sample.endswith('gz'): sample_name = analysis_id + '.tar.gz' shutil.move(sample, os.path.join(work_dir, sample_name)) else: sample_name = analysis_id + '.tar' shutil.move(sample, os.path.join(work_dir, sample_name)) # Parse s3_dir to get bucket and s3 path s3_dir = inputs['s3_dir'] bucket_name = s3_dir.lstrip('/').split('/')[0] base_url = 'https://s3-us-west-2.amazonaws.com/' url = os.path.join(base_url, bucket_name, sample_name) # Generate keyfile for upload with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out: f_out.write(generate_unique_key(key_path, url)) # Upload to S3 via S3AM s3am_command = ['s3am', 'upload', '--sse-key-file', os.path.join(work_dir, 'temp.key'), 'file://{}'.format(os.path.join(work_dir, sample_name)), 's3://' + bucket_name + '/'] subprocess.check_call(s3am_command)
[ "def", "download_and_transfer_sample", "(", "job", ",", "sample", ",", "inputs", ")", ":", "analysis_id", "=", "sample", "[", "0", "]", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "folder_path", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "os", ".", "path", ".", "basename", "(", "analysis_id", ")", ")", "# Acquire genetorrent key and download sample", "shutil", ".", "copy", "(", "inputs", "[", "'genetorrent_key'", "]", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'cghub.key'", ")", ")", "parameters", "=", "[", "'-vv'", ",", "'-c'", ",", "'cghub.key'", ",", "'-d'", ",", "analysis_id", "]", "docker_call", "(", "job", "=", "job", ",", "tool", "=", "'quay.io/ucsc_cgl/genetorrent:3.8.7--9911761265b6f08bc3ef09f53af05f56848d805b'", ",", "work_dir", "=", "work_dir", ",", "parameters", "=", "parameters", ")", "try", ":", "sample", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "folder_path", ",", "'*tar*'", ")", ")", "[", "0", "]", "except", "KeyError", "as", "e", ":", "print", "'No tarfile found inside of folder: '", ".", "format", "(", "e", ")", "raise", "# Upload sample to S3AM", "key_path", "=", "inputs", "[", "'ssec'", "]", "if", "sample", ".", "endswith", "(", "'gz'", ")", ":", "sample_name", "=", "analysis_id", "+", "'.tar.gz'", "shutil", ".", "move", "(", "sample", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "sample_name", ")", ")", "else", ":", "sample_name", "=", "analysis_id", "+", "'.tar'", "shutil", ".", "move", "(", "sample", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "sample_name", ")", ")", "# Parse s3_dir to get bucket and s3 path", "s3_dir", "=", "inputs", "[", "'s3_dir'", "]", "bucket_name", "=", "s3_dir", ".", "lstrip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "[", "0", "]", "base_url", "=", "'https://s3-us-west-2.amazonaws.com/'", "url", "=", "os", ".", "path", ".", "join", "(", "base_url", ",", "bucket_name", ",", "sample_name", ")", "# Generate keyfile for upload", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'temp.key'", ")", ",", "'wb'", ")", "as", "f_out", ":", "f_out", ".", "write", "(", "generate_unique_key", "(", "key_path", ",", "url", ")", ")", "# Upload to S3 via S3AM", "s3am_command", "=", "[", "'s3am'", ",", "'upload'", ",", "'--sse-key-file'", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'temp.key'", ")", ",", "'file://{}'", ".", "format", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "sample_name", ")", ")", ",", "'s3://'", "+", "bucket_name", "+", "'/'", "]", "subprocess", ".", "check_call", "(", "s3am_command", ")" ]
Downloads a sample from CGHub via GeneTorrent, then uses S3AM to transfer it to S3 input_args: dict Dictionary of input arguments analysis_id: str An analysis ID for a sample in CGHub
[ "Downloads", "a", "sample", "from", "CGHub", "via", "GeneTorrent", "then", "uses", "S3AM", "to", "transfer", "it", "to", "S3" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/transfer_tcga_to_s3/transfer_tcga_to_s3.py#L65-L108
BD2KGenomics/toil-scripts
src/toil_scripts/transfer_tcga_to_s3/transfer_tcga_to_s3.py
main
def main(): """ This is a Toil pipeline to transfer TCGA data into an S3 Bucket Data is pulled down with Genetorrent and transferred to S3 via S3AM. """ # Define Parser object and add to toil parser = build_parser() Job.Runner.addToilOptions(parser) args = parser.parse_args() # Store inputs from argparse inputs = {'genetorrent': args.genetorrent, 'genetorrent_key': args.genetorrent_key, 'ssec': args.ssec, 's3_dir': args.s3_dir} # Sanity checks if args.ssec: assert os.path.isfile(args.ssec) if args.genetorrent: assert os.path.isfile(args.genetorrent) if args.genetorrent_key: assert os.path.isfile(args.genetorrent_key) samples = parse_genetorrent(args.genetorrent) # Start pipeline # map_job accepts a function, an iterable, and *args. The function is launched as a child # process with one element from the iterable and *args, which in turn spawns a tree of child jobs. Job.Runner.startToil(Job.wrapJobFn(map_job, download_and_transfer_sample, samples, inputs), args)
python
def main(): """ This is a Toil pipeline to transfer TCGA data into an S3 Bucket Data is pulled down with Genetorrent and transferred to S3 via S3AM. """ # Define Parser object and add to toil parser = build_parser() Job.Runner.addToilOptions(parser) args = parser.parse_args() # Store inputs from argparse inputs = {'genetorrent': args.genetorrent, 'genetorrent_key': args.genetorrent_key, 'ssec': args.ssec, 's3_dir': args.s3_dir} # Sanity checks if args.ssec: assert os.path.isfile(args.ssec) if args.genetorrent: assert os.path.isfile(args.genetorrent) if args.genetorrent_key: assert os.path.isfile(args.genetorrent_key) samples = parse_genetorrent(args.genetorrent) # Start pipeline # map_job accepts a function, an iterable, and *args. The function is launched as a child # process with one element from the iterable and *args, which in turn spawns a tree of child jobs. Job.Runner.startToil(Job.wrapJobFn(map_job, download_and_transfer_sample, samples, inputs), args)
[ "def", "main", "(", ")", ":", "# Define Parser object and add to toil", "parser", "=", "build_parser", "(", ")", "Job", ".", "Runner", ".", "addToilOptions", "(", "parser", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "# Store inputs from argparse", "inputs", "=", "{", "'genetorrent'", ":", "args", ".", "genetorrent", ",", "'genetorrent_key'", ":", "args", ".", "genetorrent_key", ",", "'ssec'", ":", "args", ".", "ssec", ",", "'s3_dir'", ":", "args", ".", "s3_dir", "}", "# Sanity checks", "if", "args", ".", "ssec", ":", "assert", "os", ".", "path", ".", "isfile", "(", "args", ".", "ssec", ")", "if", "args", ".", "genetorrent", ":", "assert", "os", ".", "path", ".", "isfile", "(", "args", ".", "genetorrent", ")", "if", "args", ".", "genetorrent_key", ":", "assert", "os", ".", "path", ".", "isfile", "(", "args", ".", "genetorrent_key", ")", "samples", "=", "parse_genetorrent", "(", "args", ".", "genetorrent", ")", "# Start pipeline", "# map_job accepts a function, an iterable, and *args. The function is launched as a child", "# process with one element from the iterable and *args, which in turn spawns a tree of child jobs.", "Job", ".", "Runner", ".", "startToil", "(", "Job", ".", "wrapJobFn", "(", "map_job", ",", "download_and_transfer_sample", ",", "samples", ",", "inputs", ")", ",", "args", ")" ]
This is a Toil pipeline to transfer TCGA data into an S3 Bucket Data is pulled down with Genetorrent and transferred to S3 via S3AM.
[ "This", "is", "a", "Toil", "pipeline", "to", "transfer", "TCGA", "data", "into", "an", "S3", "Bucket" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/transfer_tcga_to_s3/transfer_tcga_to_s3.py#L111-L137
bd808/python-iptools
iptools/ipv6.py
validate_ip
def validate_ip(s): """Validate a hexidecimal IPv6 ip address. >>> validate_ip('::') True >>> validate_ip('::1') True >>> validate_ip('2001:db8:85a3::8a2e:370:7334') True >>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334') True >>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334') True >>> validate_ip('2001:db8::1:0:0:1') True >>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') True >>> validate_ip('::ffff:192.0.2.128') True >>> validate_ip('::ff::ff') False >>> validate_ip('::fffff') False >>> validate_ip('::ffff:192.0.2.300') False >>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected string or buffer >>> validate_ip('1080:0:0:0:8:800:200c:417a') True :param s: String to validate as a hexidecimal IPv6 ip address. :type s: str :returns: ``True`` if a valid hexidecimal IPv6 ip address, ``False`` otherwise. :raises: TypeError """ if _HEX_RE.match(s): return len(s.split('::')) <= 2 if _DOTTED_QUAD_RE.match(s): halves = s.split('::') if len(halves) > 2: return False hextets = s.split(':') quads = hextets[-1].split('.') for q in quads: if int(q) > 255: return False return True return False
python
def validate_ip(s): """Validate a hexidecimal IPv6 ip address. >>> validate_ip('::') True >>> validate_ip('::1') True >>> validate_ip('2001:db8:85a3::8a2e:370:7334') True >>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334') True >>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334') True >>> validate_ip('2001:db8::1:0:0:1') True >>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') True >>> validate_ip('::ffff:192.0.2.128') True >>> validate_ip('::ff::ff') False >>> validate_ip('::fffff') False >>> validate_ip('::ffff:192.0.2.300') False >>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected string or buffer >>> validate_ip('1080:0:0:0:8:800:200c:417a') True :param s: String to validate as a hexidecimal IPv6 ip address. :type s: str :returns: ``True`` if a valid hexidecimal IPv6 ip address, ``False`` otherwise. :raises: TypeError """ if _HEX_RE.match(s): return len(s.split('::')) <= 2 if _DOTTED_QUAD_RE.match(s): halves = s.split('::') if len(halves) > 2: return False hextets = s.split(':') quads = hextets[-1].split('.') for q in quads: if int(q) > 255: return False return True return False
[ "def", "validate_ip", "(", "s", ")", ":", "if", "_HEX_RE", ".", "match", "(", "s", ")", ":", "return", "len", "(", "s", ".", "split", "(", "'::'", ")", ")", "<=", "2", "if", "_DOTTED_QUAD_RE", ".", "match", "(", "s", ")", ":", "halves", "=", "s", ".", "split", "(", "'::'", ")", "if", "len", "(", "halves", ")", ">", "2", ":", "return", "False", "hextets", "=", "s", ".", "split", "(", "':'", ")", "quads", "=", "hextets", "[", "-", "1", "]", ".", "split", "(", "'.'", ")", "for", "q", "in", "quads", ":", "if", "int", "(", "q", ")", ">", "255", ":", "return", "False", "return", "True", "return", "False" ]
Validate a hexidecimal IPv6 ip address. >>> validate_ip('::') True >>> validate_ip('::1') True >>> validate_ip('2001:db8:85a3::8a2e:370:7334') True >>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334') True >>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334') True >>> validate_ip('2001:db8::1:0:0:1') True >>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') True >>> validate_ip('::ffff:192.0.2.128') True >>> validate_ip('::ff::ff') False >>> validate_ip('::fffff') False >>> validate_ip('::ffff:192.0.2.300') False >>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected string or buffer >>> validate_ip('1080:0:0:0:8:800:200c:417a') True :param s: String to validate as a hexidecimal IPv6 ip address. :type s: str :returns: ``True`` if a valid hexidecimal IPv6 ip address, ``False`` otherwise. :raises: TypeError
[ "Validate", "a", "hexidecimal", "IPv6", "ip", "address", "." ]
train
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv6.py#L157-L209
bd808/python-iptools
iptools/ipv6.py
ip2long
def ip2long(ip): """Convert a hexidecimal IPv6 address to a network byte order 128-bit integer. >>> ip2long('::') == 0 True >>> ip2long('::1') == 1 True >>> expect = 0x20010db885a3000000008a2e03707334 >>> ip2long('2001:db8:85a3::8a2e:370:7334') == expect True >>> ip2long('2001:db8:85a3:0:0:8a2e:370:7334') == expect True >>> ip2long('2001:0db8:85a3:0000:0000:8a2e:0370:7334') == expect True >>> expect = 0x20010db8000000000001000000000001 >>> ip2long('2001:db8::1:0:0:1') == expect True >>> expect = 281473902969472 >>> ip2long('::ffff:192.0.2.128') == expect True >>> expect = 0xffffffffffffffffffffffffffffffff >>> ip2long('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') == expect True >>> ip2long('ff::ff::ff') == None True >>> expect = 21932261930451111902915077091070067066 >>> ip2long('1080:0:0:0:8:800:200C:417A') == expect True :param ip: Hexidecimal IPv6 address :type ip: str :returns: Network byte order 128-bit integer or ``None`` if ip is invalid. """ if not validate_ip(ip): return None if '.' in ip: # convert IPv4 suffix to hex chunks = ip.split(':') v4_int = ipv4.ip2long(chunks.pop()) if v4_int is None: return None chunks.append('%x' % ((v4_int >> 16) & 0xffff)) chunks.append('%x' % (v4_int & 0xffff)) ip = ':'.join(chunks) halves = ip.split('::') hextets = halves[0].split(':') if len(halves) == 2: h2 = halves[1].split(':') for z in range(8 - (len(hextets) + len(h2))): hextets.append('0') for h in h2: hextets.append(h) # end if lngip = 0 for h in hextets: if '' == h: h = '0' lngip = (lngip << 16) | int(h, 16) return lngip
python
def ip2long(ip): """Convert a hexidecimal IPv6 address to a network byte order 128-bit integer. >>> ip2long('::') == 0 True >>> ip2long('::1') == 1 True >>> expect = 0x20010db885a3000000008a2e03707334 >>> ip2long('2001:db8:85a3::8a2e:370:7334') == expect True >>> ip2long('2001:db8:85a3:0:0:8a2e:370:7334') == expect True >>> ip2long('2001:0db8:85a3:0000:0000:8a2e:0370:7334') == expect True >>> expect = 0x20010db8000000000001000000000001 >>> ip2long('2001:db8::1:0:0:1') == expect True >>> expect = 281473902969472 >>> ip2long('::ffff:192.0.2.128') == expect True >>> expect = 0xffffffffffffffffffffffffffffffff >>> ip2long('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') == expect True >>> ip2long('ff::ff::ff') == None True >>> expect = 21932261930451111902915077091070067066 >>> ip2long('1080:0:0:0:8:800:200C:417A') == expect True :param ip: Hexidecimal IPv6 address :type ip: str :returns: Network byte order 128-bit integer or ``None`` if ip is invalid. """ if not validate_ip(ip): return None if '.' in ip: # convert IPv4 suffix to hex chunks = ip.split(':') v4_int = ipv4.ip2long(chunks.pop()) if v4_int is None: return None chunks.append('%x' % ((v4_int >> 16) & 0xffff)) chunks.append('%x' % (v4_int & 0xffff)) ip = ':'.join(chunks) halves = ip.split('::') hextets = halves[0].split(':') if len(halves) == 2: h2 = halves[1].split(':') for z in range(8 - (len(hextets) + len(h2))): hextets.append('0') for h in h2: hextets.append(h) # end if lngip = 0 for h in hextets: if '' == h: h = '0' lngip = (lngip << 16) | int(h, 16) return lngip
[ "def", "ip2long", "(", "ip", ")", ":", "if", "not", "validate_ip", "(", "ip", ")", ":", "return", "None", "if", "'.'", "in", "ip", ":", "# convert IPv4 suffix to hex", "chunks", "=", "ip", ".", "split", "(", "':'", ")", "v4_int", "=", "ipv4", ".", "ip2long", "(", "chunks", ".", "pop", "(", ")", ")", "if", "v4_int", "is", "None", ":", "return", "None", "chunks", ".", "append", "(", "'%x'", "%", "(", "(", "v4_int", ">>", "16", ")", "&", "0xffff", ")", ")", "chunks", ".", "append", "(", "'%x'", "%", "(", "v4_int", "&", "0xffff", ")", ")", "ip", "=", "':'", ".", "join", "(", "chunks", ")", "halves", "=", "ip", ".", "split", "(", "'::'", ")", "hextets", "=", "halves", "[", "0", "]", ".", "split", "(", "':'", ")", "if", "len", "(", "halves", ")", "==", "2", ":", "h2", "=", "halves", "[", "1", "]", ".", "split", "(", "':'", ")", "for", "z", "in", "range", "(", "8", "-", "(", "len", "(", "hextets", ")", "+", "len", "(", "h2", ")", ")", ")", ":", "hextets", ".", "append", "(", "'0'", ")", "for", "h", "in", "h2", ":", "hextets", ".", "append", "(", "h", ")", "# end if", "lngip", "=", "0", "for", "h", "in", "hextets", ":", "if", "''", "==", "h", ":", "h", "=", "'0'", "lngip", "=", "(", "lngip", "<<", "16", ")", "|", "int", "(", "h", ",", "16", ")", "return", "lngip" ]
Convert a hexidecimal IPv6 address to a network byte order 128-bit integer. >>> ip2long('::') == 0 True >>> ip2long('::1') == 1 True >>> expect = 0x20010db885a3000000008a2e03707334 >>> ip2long('2001:db8:85a3::8a2e:370:7334') == expect True >>> ip2long('2001:db8:85a3:0:0:8a2e:370:7334') == expect True >>> ip2long('2001:0db8:85a3:0000:0000:8a2e:0370:7334') == expect True >>> expect = 0x20010db8000000000001000000000001 >>> ip2long('2001:db8::1:0:0:1') == expect True >>> expect = 281473902969472 >>> ip2long('::ffff:192.0.2.128') == expect True >>> expect = 0xffffffffffffffffffffffffffffffff >>> ip2long('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') == expect True >>> ip2long('ff::ff::ff') == None True >>> expect = 21932261930451111902915077091070067066 >>> ip2long('1080:0:0:0:8:800:200C:417A') == expect True :param ip: Hexidecimal IPv6 address :type ip: str :returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
[ "Convert", "a", "hexidecimal", "IPv6", "address", "to", "a", "network", "byte", "order", "128", "-", "bit", "integer", "." ]
train
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv6.py#L213-L277
bd808/python-iptools
iptools/ipv6.py
long2ip
def long2ip(l, rfc1924=False): """Convert a network byte order 128-bit integer to a canonical IPv6 address. >>> long2ip(2130706433) '::7f00:1' >>> long2ip(42540766411282592856904266426630537217) '2001:db8::1:0:0:1' >>> long2ip(MIN_IP) '::' >>> long2ip(MAX_IP) 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff' >>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int' >>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and <really big int> inclusive >>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and <really big int> inclusive >>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True) '4)+k&C#VzJ4br>0wv%Yp' >>> long2ip(ip2long('::'), rfc1924=True) '00000000000000000000' :param l: Network byte order 128-bit integer. :type l: int :param rfc1924: Encode in RFC 1924 notation (base 85) :type rfc1924: bool :returns: Canonical IPv6 address (eg. '::1'). :raises: TypeError """ if MAX_IP < l or l < MIN_IP: raise TypeError( "expected int between %d and %d inclusive" % (MIN_IP, MAX_IP)) if rfc1924: return long2rfc1924(l) # format as one big hex value hex_str = '%032x' % l # split into double octet chunks without padding zeros hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)] # find and remove left most longest run of zeros dc_start, dc_len = (-1, 0) run_start, run_len = (-1, 0) for idx, hextet in enumerate(hextets): if '0' == hextet: run_len += 1 if -1 == run_start: run_start = idx if run_len > dc_len: dc_len, dc_start = (run_len, run_start) else: run_len, run_start = (0, -1) # end for if dc_len > 1: dc_end = dc_start + dc_len if dc_end == len(hextets): hextets += [''] hextets[dc_start:dc_end] = [''] if dc_start == 0: hextets = [''] + hextets # end if return ':'.join(hextets)
python
def long2ip(l, rfc1924=False): """Convert a network byte order 128-bit integer to a canonical IPv6 address. >>> long2ip(2130706433) '::7f00:1' >>> long2ip(42540766411282592856904266426630537217) '2001:db8::1:0:0:1' >>> long2ip(MIN_IP) '::' >>> long2ip(MAX_IP) 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff' >>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int' >>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and <really big int> inclusive >>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and <really big int> inclusive >>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True) '4)+k&C#VzJ4br>0wv%Yp' >>> long2ip(ip2long('::'), rfc1924=True) '00000000000000000000' :param l: Network byte order 128-bit integer. :type l: int :param rfc1924: Encode in RFC 1924 notation (base 85) :type rfc1924: bool :returns: Canonical IPv6 address (eg. '::1'). :raises: TypeError """ if MAX_IP < l or l < MIN_IP: raise TypeError( "expected int between %d and %d inclusive" % (MIN_IP, MAX_IP)) if rfc1924: return long2rfc1924(l) # format as one big hex value hex_str = '%032x' % l # split into double octet chunks without padding zeros hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)] # find and remove left most longest run of zeros dc_start, dc_len = (-1, 0) run_start, run_len = (-1, 0) for idx, hextet in enumerate(hextets): if '0' == hextet: run_len += 1 if -1 == run_start: run_start = idx if run_len > dc_len: dc_len, dc_start = (run_len, run_start) else: run_len, run_start = (0, -1) # end for if dc_len > 1: dc_end = dc_start + dc_len if dc_end == len(hextets): hextets += [''] hextets[dc_start:dc_end] = [''] if dc_start == 0: hextets = [''] + hextets # end if return ':'.join(hextets)
[ "def", "long2ip", "(", "l", ",", "rfc1924", "=", "False", ")", ":", "if", "MAX_IP", "<", "l", "or", "l", "<", "MIN_IP", ":", "raise", "TypeError", "(", "\"expected int between %d and %d inclusive\"", "%", "(", "MIN_IP", ",", "MAX_IP", ")", ")", "if", "rfc1924", ":", "return", "long2rfc1924", "(", "l", ")", "# format as one big hex value", "hex_str", "=", "'%032x'", "%", "l", "# split into double octet chunks without padding zeros", "hextets", "=", "[", "'%x'", "%", "int", "(", "hex_str", "[", "x", ":", "x", "+", "4", "]", ",", "16", ")", "for", "x", "in", "range", "(", "0", ",", "32", ",", "4", ")", "]", "# find and remove left most longest run of zeros", "dc_start", ",", "dc_len", "=", "(", "-", "1", ",", "0", ")", "run_start", ",", "run_len", "=", "(", "-", "1", ",", "0", ")", "for", "idx", ",", "hextet", "in", "enumerate", "(", "hextets", ")", ":", "if", "'0'", "==", "hextet", ":", "run_len", "+=", "1", "if", "-", "1", "==", "run_start", ":", "run_start", "=", "idx", "if", "run_len", ">", "dc_len", ":", "dc_len", ",", "dc_start", "=", "(", "run_len", ",", "run_start", ")", "else", ":", "run_len", ",", "run_start", "=", "(", "0", ",", "-", "1", ")", "# end for", "if", "dc_len", ">", "1", ":", "dc_end", "=", "dc_start", "+", "dc_len", "if", "dc_end", "==", "len", "(", "hextets", ")", ":", "hextets", "+=", "[", "''", "]", "hextets", "[", "dc_start", ":", "dc_end", "]", "=", "[", "''", "]", "if", "dc_start", "==", "0", ":", "hextets", "=", "[", "''", "]", "+", "hextets", "# end if", "return", "':'", ".", "join", "(", "hextets", ")" ]
Convert a network byte order 128-bit integer to a canonical IPv6 address. >>> long2ip(2130706433) '::7f00:1' >>> long2ip(42540766411282592856904266426630537217) '2001:db8::1:0:0:1' >>> long2ip(MIN_IP) '::' >>> long2ip(MAX_IP) 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff' >>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int' >>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and <really big int> inclusive >>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and <really big int> inclusive >>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True) '4)+k&C#VzJ4br>0wv%Yp' >>> long2ip(ip2long('::'), rfc1924=True) '00000000000000000000' :param l: Network byte order 128-bit integer. :type l: int :param rfc1924: Encode in RFC 1924 notation (base 85) :type rfc1924: bool :returns: Canonical IPv6 address (eg. '::1'). :raises: TypeError
[ "Convert", "a", "network", "byte", "order", "128", "-", "bit", "integer", "to", "a", "canonical", "IPv6", "address", "." ]
train
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv6.py#L281-L353
bd808/python-iptools
iptools/ipv6.py
long2rfc1924
def long2rfc1924(l): """Convert a network byte order 128-bit integer to an rfc1924 IPv6 address. >>> long2rfc1924(ip2long('1080::8:800:200C:417A')) '4)+k&C#VzJ4br>0wv%Yp' >>> long2rfc1924(ip2long('::')) '00000000000000000000' >>> long2rfc1924(MAX_IP) '=r54lj&NUUO~Hi%c2ym0' :param l: Network byte order 128-bit integer. :type l: int :returns: RFC 1924 IPv6 address :raises: TypeError """ if MAX_IP < l or l < MIN_IP: raise TypeError( "expected int between %d and %d inclusive" % (MIN_IP, MAX_IP)) o = [] r = l while r > 85: o.append(_RFC1924_ALPHABET[r % 85]) r = r // 85 o.append(_RFC1924_ALPHABET[r]) return ''.join(reversed(o)).zfill(20)
python
def long2rfc1924(l): """Convert a network byte order 128-bit integer to an rfc1924 IPv6 address. >>> long2rfc1924(ip2long('1080::8:800:200C:417A')) '4)+k&C#VzJ4br>0wv%Yp' >>> long2rfc1924(ip2long('::')) '00000000000000000000' >>> long2rfc1924(MAX_IP) '=r54lj&NUUO~Hi%c2ym0' :param l: Network byte order 128-bit integer. :type l: int :returns: RFC 1924 IPv6 address :raises: TypeError """ if MAX_IP < l or l < MIN_IP: raise TypeError( "expected int between %d and %d inclusive" % (MIN_IP, MAX_IP)) o = [] r = l while r > 85: o.append(_RFC1924_ALPHABET[r % 85]) r = r // 85 o.append(_RFC1924_ALPHABET[r]) return ''.join(reversed(o)).zfill(20)
[ "def", "long2rfc1924", "(", "l", ")", ":", "if", "MAX_IP", "<", "l", "or", "l", "<", "MIN_IP", ":", "raise", "TypeError", "(", "\"expected int between %d and %d inclusive\"", "%", "(", "MIN_IP", ",", "MAX_IP", ")", ")", "o", "=", "[", "]", "r", "=", "l", "while", "r", ">", "85", ":", "o", ".", "append", "(", "_RFC1924_ALPHABET", "[", "r", "%", "85", "]", ")", "r", "=", "r", "//", "85", "o", ".", "append", "(", "_RFC1924_ALPHABET", "[", "r", "]", ")", "return", "''", ".", "join", "(", "reversed", "(", "o", ")", ")", ".", "zfill", "(", "20", ")" ]
Convert a network byte order 128-bit integer to an rfc1924 IPv6 address. >>> long2rfc1924(ip2long('1080::8:800:200C:417A')) '4)+k&C#VzJ4br>0wv%Yp' >>> long2rfc1924(ip2long('::')) '00000000000000000000' >>> long2rfc1924(MAX_IP) '=r54lj&NUUO~Hi%c2ym0' :param l: Network byte order 128-bit integer. :type l: int :returns: RFC 1924 IPv6 address :raises: TypeError
[ "Convert", "a", "network", "byte", "order", "128", "-", "bit", "integer", "to", "an", "rfc1924", "IPv6", "address", "." ]
train
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv6.py#L357-L384
bd808/python-iptools
iptools/ipv6.py
rfc19242long
def rfc19242long(s): """Convert an RFC 1924 IPv6 address to a network byte order 128-bit integer. >>> expect = 0 >>> rfc19242long('00000000000000000000') == expect True >>> expect = 21932261930451111902915077091070067066 >>> rfc19242long('4)+k&C#VzJ4br>0wv%Yp') == expect True >>> rfc19242long('pizza') == None True >>> rfc19242long('~~~~~~~~~~~~~~~~~~~~') == None True >>> rfc19242long('=r54lj&NUUO~Hi%c2ym0') == MAX_IP True :param ip: RFC 1924 IPv6 address :type ip: str :returns: Network byte order 128-bit integer or ``None`` if ip is invalid. """ global _RFC1924_REV if not _RFC1924_RE.match(s): return None if _RFC1924_REV is None: _RFC1924_REV = {v: k for k, v in enumerate(_RFC1924_ALPHABET)} x = 0 for c in s: x = x * 85 + _RFC1924_REV[c] if x > MAX_IP: return None return x
python
def rfc19242long(s): """Convert an RFC 1924 IPv6 address to a network byte order 128-bit integer. >>> expect = 0 >>> rfc19242long('00000000000000000000') == expect True >>> expect = 21932261930451111902915077091070067066 >>> rfc19242long('4)+k&C#VzJ4br>0wv%Yp') == expect True >>> rfc19242long('pizza') == None True >>> rfc19242long('~~~~~~~~~~~~~~~~~~~~') == None True >>> rfc19242long('=r54lj&NUUO~Hi%c2ym0') == MAX_IP True :param ip: RFC 1924 IPv6 address :type ip: str :returns: Network byte order 128-bit integer or ``None`` if ip is invalid. """ global _RFC1924_REV if not _RFC1924_RE.match(s): return None if _RFC1924_REV is None: _RFC1924_REV = {v: k for k, v in enumerate(_RFC1924_ALPHABET)} x = 0 for c in s: x = x * 85 + _RFC1924_REV[c] if x > MAX_IP: return None return x
[ "def", "rfc19242long", "(", "s", ")", ":", "global", "_RFC1924_REV", "if", "not", "_RFC1924_RE", ".", "match", "(", "s", ")", ":", "return", "None", "if", "_RFC1924_REV", "is", "None", ":", "_RFC1924_REV", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "enumerate", "(", "_RFC1924_ALPHABET", ")", "}", "x", "=", "0", "for", "c", "in", "s", ":", "x", "=", "x", "*", "85", "+", "_RFC1924_REV", "[", "c", "]", "if", "x", ">", "MAX_IP", ":", "return", "None", "return", "x" ]
Convert an RFC 1924 IPv6 address to a network byte order 128-bit integer. >>> expect = 0 >>> rfc19242long('00000000000000000000') == expect True >>> expect = 21932261930451111902915077091070067066 >>> rfc19242long('4)+k&C#VzJ4br>0wv%Yp') == expect True >>> rfc19242long('pizza') == None True >>> rfc19242long('~~~~~~~~~~~~~~~~~~~~') == None True >>> rfc19242long('=r54lj&NUUO~Hi%c2ym0') == MAX_IP True :param ip: RFC 1924 IPv6 address :type ip: str :returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
[ "Convert", "an", "RFC", "1924", "IPv6", "address", "to", "a", "network", "byte", "order", "128", "-", "bit", "integer", "." ]
train
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv6.py#L387-L420
bd808/python-iptools
iptools/ipv6.py
validate_cidr
def validate_cidr(s): """Validate a CIDR notation ip address. The string is considered a valid CIDR address if it consists of a valid IPv6 address in hextet format followed by a forward slash (/) and a bit mask length (0-128). >>> validate_cidr('::/128') True >>> validate_cidr('::/0') True >>> validate_cidr('fc00::/7') True >>> validate_cidr('::ffff:0:0/96') True >>> validate_cidr('::') False >>> validate_cidr('::/129') False >>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected string or buffer :param s: String to validate as a CIDR notation ip address. :type s: str :returns: ``True`` if a valid CIDR address, ``False`` otherwise. :raises: TypeError """ if _CIDR_RE.match(s): ip, mask = s.split('/') if validate_ip(ip): if int(mask) > 128: return False else: return False return True return False
python
def validate_cidr(s): """Validate a CIDR notation ip address. The string is considered a valid CIDR address if it consists of a valid IPv6 address in hextet format followed by a forward slash (/) and a bit mask length (0-128). >>> validate_cidr('::/128') True >>> validate_cidr('::/0') True >>> validate_cidr('fc00::/7') True >>> validate_cidr('::ffff:0:0/96') True >>> validate_cidr('::') False >>> validate_cidr('::/129') False >>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected string or buffer :param s: String to validate as a CIDR notation ip address. :type s: str :returns: ``True`` if a valid CIDR address, ``False`` otherwise. :raises: TypeError """ if _CIDR_RE.match(s): ip, mask = s.split('/') if validate_ip(ip): if int(mask) > 128: return False else: return False return True return False
[ "def", "validate_cidr", "(", "s", ")", ":", "if", "_CIDR_RE", ".", "match", "(", "s", ")", ":", "ip", ",", "mask", "=", "s", ".", "split", "(", "'/'", ")", "if", "validate_ip", "(", "ip", ")", ":", "if", "int", "(", "mask", ")", ">", "128", ":", "return", "False", "else", ":", "return", "False", "return", "True", "return", "False" ]
Validate a CIDR notation ip address. The string is considered a valid CIDR address if it consists of a valid IPv6 address in hextet format followed by a forward slash (/) and a bit mask length (0-128). >>> validate_cidr('::/128') True >>> validate_cidr('::/0') True >>> validate_cidr('fc00::/7') True >>> validate_cidr('::ffff:0:0/96') True >>> validate_cidr('::') False >>> validate_cidr('::/129') False >>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected string or buffer :param s: String to validate as a CIDR notation ip address. :type s: str :returns: ``True`` if a valid CIDR address, ``False`` otherwise. :raises: TypeError
[ "Validate", "a", "CIDR", "notation", "ip", "address", "." ]
train
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv6.py#L423-L462
bd808/python-iptools
iptools/ipv6.py
cidr2block
def cidr2block(cidr): """Convert a CIDR notation ip address into a tuple containing the network block start and end addresses. >>> cidr2block('2001:db8::/48') ('2001:db8::', '2001:db8:0:ffff:ffff:ffff:ffff:ffff') >>> cidr2block('::/0') ('::', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') :param cidr: CIDR notation ip address (eg. '127.0.0.1/8'). :type cidr: str :returns: Tuple of block (start, end) or ``None`` if invalid. :raises: TypeError """ if not validate_cidr(cidr): return None ip, prefix = cidr.split('/') prefix = int(prefix) ip = ip2long(ip) # keep left most prefix bits of ip shift = 128 - prefix block_start = ip >> shift << shift # expand right most 128 - prefix bits to 1 mask = (1 << shift) - 1 block_end = block_start | mask return (long2ip(block_start), long2ip(block_end))
python
def cidr2block(cidr): """Convert a CIDR notation ip address into a tuple containing the network block start and end addresses. >>> cidr2block('2001:db8::/48') ('2001:db8::', '2001:db8:0:ffff:ffff:ffff:ffff:ffff') >>> cidr2block('::/0') ('::', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') :param cidr: CIDR notation ip address (eg. '127.0.0.1/8'). :type cidr: str :returns: Tuple of block (start, end) or ``None`` if invalid. :raises: TypeError """ if not validate_cidr(cidr): return None ip, prefix = cidr.split('/') prefix = int(prefix) ip = ip2long(ip) # keep left most prefix bits of ip shift = 128 - prefix block_start = ip >> shift << shift # expand right most 128 - prefix bits to 1 mask = (1 << shift) - 1 block_end = block_start | mask return (long2ip(block_start), long2ip(block_end))
[ "def", "cidr2block", "(", "cidr", ")", ":", "if", "not", "validate_cidr", "(", "cidr", ")", ":", "return", "None", "ip", ",", "prefix", "=", "cidr", ".", "split", "(", "'/'", ")", "prefix", "=", "int", "(", "prefix", ")", "ip", "=", "ip2long", "(", "ip", ")", "# keep left most prefix bits of ip", "shift", "=", "128", "-", "prefix", "block_start", "=", "ip", ">>", "shift", "<<", "shift", "# expand right most 128 - prefix bits to 1", "mask", "=", "(", "1", "<<", "shift", ")", "-", "1", "block_end", "=", "block_start", "|", "mask", "return", "(", "long2ip", "(", "block_start", ")", ",", "long2ip", "(", "block_end", ")", ")" ]
Convert a CIDR notation ip address into a tuple containing the network block start and end addresses. >>> cidr2block('2001:db8::/48') ('2001:db8::', '2001:db8:0:ffff:ffff:ffff:ffff:ffff') >>> cidr2block('::/0') ('::', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') :param cidr: CIDR notation ip address (eg. '127.0.0.1/8'). :type cidr: str :returns: Tuple of block (start, end) or ``None`` if invalid. :raises: TypeError
[ "Convert", "a", "CIDR", "notation", "ip", "address", "into", "a", "tuple", "containing", "the", "network", "block", "start", "and", "end", "addresses", "." ]
train
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv6.py#L466-L496
BD2KGenomics/toil-scripts
src/toil_scripts/spladder_pipeline/spladder_pipeline.py
parse_input_samples
def parse_input_samples(job, inputs): """ Parses config file to pull sample information. Stores samples as tuples of (uuid, URL) :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) """ job.fileStore.logToMaster('Parsing input samples and batching jobs') samples = [] if inputs.config: with open(inputs.config, 'r') as f: for line in f.readlines(): if not line.isspace(): sample = line.strip().split(',') assert len(sample) == 2, 'Error: Config file is inappropriately formatted.' samples.append(sample) job.addChildJobFn(map_job, download_sample, samples, inputs)
python
def parse_input_samples(job, inputs): """ Parses config file to pull sample information. Stores samples as tuples of (uuid, URL) :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) """ job.fileStore.logToMaster('Parsing input samples and batching jobs') samples = [] if inputs.config: with open(inputs.config, 'r') as f: for line in f.readlines(): if not line.isspace(): sample = line.strip().split(',') assert len(sample) == 2, 'Error: Config file is inappropriately formatted.' samples.append(sample) job.addChildJobFn(map_job, download_sample, samples, inputs)
[ "def", "parse_input_samples", "(", "job", ",", "inputs", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Parsing input samples and batching jobs'", ")", "samples", "=", "[", "]", "if", "inputs", ".", "config", ":", "with", "open", "(", "inputs", ".", "config", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "if", "not", "line", ".", "isspace", "(", ")", ":", "sample", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "','", ")", "assert", "len", "(", "sample", ")", "==", "2", ",", "'Error: Config file is inappropriately formatted.'", "samples", ".", "append", "(", "sample", ")", "job", ".", "addChildJobFn", "(", "map_job", ",", "download_sample", ",", "samples", ",", "inputs", ")" ]
Parses config file to pull sample information. Stores samples as tuples of (uuid, URL) :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main)
[ "Parses", "config", "file", "to", "pull", "sample", "information", ".", "Stores", "samples", "as", "tuples", "of", "(", "uuid", "URL", ")" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/spladder_pipeline/spladder_pipeline.py#L43-L60
BD2KGenomics/toil-scripts
src/toil_scripts/spladder_pipeline/spladder_pipeline.py
download_sample
def download_sample(job, sample, inputs): """ Download the input sample :param JobFunctionWrappingJob job: passed by Toil automatically :param tuple sample: Tuple containing (UUID,URL) of a sample :param Namespace inputs: Stores input arguments (see main) """ uuid, url = sample job.fileStore.logToMaster('Downloading sample: {}'.format(uuid)) # Download sample tar_id = job.addChildJobFn(download_url_job, url, s3_key_path=inputs.ssec, disk='30G').rv() # Create copy of inputs for each sample sample_inputs = argparse.Namespace(**vars(inputs)) sample_inputs.uuid = uuid sample_inputs.cores = multiprocessing.cpu_count() # Call children and follow-on jobs job.addFollowOnJobFn(process_sample, sample_inputs, tar_id, cores=2, disk='60G')
python
def download_sample(job, sample, inputs): """ Download the input sample :param JobFunctionWrappingJob job: passed by Toil automatically :param tuple sample: Tuple containing (UUID,URL) of a sample :param Namespace inputs: Stores input arguments (see main) """ uuid, url = sample job.fileStore.logToMaster('Downloading sample: {}'.format(uuid)) # Download sample tar_id = job.addChildJobFn(download_url_job, url, s3_key_path=inputs.ssec, disk='30G').rv() # Create copy of inputs for each sample sample_inputs = argparse.Namespace(**vars(inputs)) sample_inputs.uuid = uuid sample_inputs.cores = multiprocessing.cpu_count() # Call children and follow-on jobs job.addFollowOnJobFn(process_sample, sample_inputs, tar_id, cores=2, disk='60G')
[ "def", "download_sample", "(", "job", ",", "sample", ",", "inputs", ")", ":", "uuid", ",", "url", "=", "sample", "job", ".", "fileStore", ".", "logToMaster", "(", "'Downloading sample: {}'", ".", "format", "(", "uuid", ")", ")", "# Download sample", "tar_id", "=", "job", ".", "addChildJobFn", "(", "download_url_job", ",", "url", ",", "s3_key_path", "=", "inputs", ".", "ssec", ",", "disk", "=", "'30G'", ")", ".", "rv", "(", ")", "# Create copy of inputs for each sample", "sample_inputs", "=", "argparse", ".", "Namespace", "(", "*", "*", "vars", "(", "inputs", ")", ")", "sample_inputs", ".", "uuid", "=", "uuid", "sample_inputs", ".", "cores", "=", "multiprocessing", ".", "cpu_count", "(", ")", "# Call children and follow-on jobs", "job", ".", "addFollowOnJobFn", "(", "process_sample", ",", "sample_inputs", ",", "tar_id", ",", "cores", "=", "2", ",", "disk", "=", "'60G'", ")" ]
Download the input sample :param JobFunctionWrappingJob job: passed by Toil automatically :param tuple sample: Tuple containing (UUID,URL) of a sample :param Namespace inputs: Stores input arguments (see main)
[ "Download", "the", "input", "sample" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/spladder_pipeline/spladder_pipeline.py#L63-L80
BD2KGenomics/toil-scripts
src/toil_scripts/spladder_pipeline/spladder_pipeline.py
process_sample
def process_sample(job, inputs, tar_id): """ Converts sample.tar(.gz) into two fastq files. Due to edge conditions... BEWARE: HERE BE DRAGONS :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str tar_id: FileStore ID of sample tar """ job.fileStore.logToMaster('Processing sample into read pairs: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() # I/O tar_path = job.fileStore.readGlobalFile(tar_id, os.path.join(work_dir, 'sample.tar')) # Untar File and concat subprocess.check_call(['tar', '-xvf', tar_path, '-C', work_dir]) os.remove(os.path.join(work_dir, 'sample.tar')) # Grab files from tarball fastqs = [] for root, subdir, files in os.walk(work_dir): fastqs.extend([os.path.join(root, x) for x in files]) # Check for read 1 and read 2 files r1 = sorted([x for x in fastqs if 'R1' in x]) r2 = sorted([x for x in fastqs if 'R2' in x]) if not r1 or not r2: # Check if using a different standard r1 = sorted([x for x in fastqs if '_1' in x]) r2 = sorted([x for x in fastqs if '_2' in x]) # Prune file name matches from each list if len(r1) > len(r2): r1 = [x for x in r1 if x not in r2] elif len(r2) > len(r1): r2 = [x for x in r2 if x not in r1] # Flag if data is single-ended assert r1 and r2, 'This pipeline does not support single-ended data. R1: {}\nR2:{}'.format(r1, r2) command = 'zcat' if r1[0].endswith('gz') and r2[0].endswith('gz') else 'cat' with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1: p1 = subprocess.Popen([command] + r1, stdout=f1) with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2: p2 = subprocess.Popen([command] + r2, stdout=f2) p1.wait() p2.wait() # Write to fileStore r1_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq')) r2_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq')) job.fileStore.deleteGlobalFile(tar_id) # Start cutadapt step job.addChildJobFn(cutadapt, inputs, r1_id, r2_id, disk='60G').rv()
python
def process_sample(job, inputs, tar_id): """ Converts sample.tar(.gz) into two fastq files. Due to edge conditions... BEWARE: HERE BE DRAGONS :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str tar_id: FileStore ID of sample tar """ job.fileStore.logToMaster('Processing sample into read pairs: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() # I/O tar_path = job.fileStore.readGlobalFile(tar_id, os.path.join(work_dir, 'sample.tar')) # Untar File and concat subprocess.check_call(['tar', '-xvf', tar_path, '-C', work_dir]) os.remove(os.path.join(work_dir, 'sample.tar')) # Grab files from tarball fastqs = [] for root, subdir, files in os.walk(work_dir): fastqs.extend([os.path.join(root, x) for x in files]) # Check for read 1 and read 2 files r1 = sorted([x for x in fastqs if 'R1' in x]) r2 = sorted([x for x in fastqs if 'R2' in x]) if not r1 or not r2: # Check if using a different standard r1 = sorted([x for x in fastqs if '_1' in x]) r2 = sorted([x for x in fastqs if '_2' in x]) # Prune file name matches from each list if len(r1) > len(r2): r1 = [x for x in r1 if x not in r2] elif len(r2) > len(r1): r2 = [x for x in r2 if x not in r1] # Flag if data is single-ended assert r1 and r2, 'This pipeline does not support single-ended data. R1: {}\nR2:{}'.format(r1, r2) command = 'zcat' if r1[0].endswith('gz') and r2[0].endswith('gz') else 'cat' with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1: p1 = subprocess.Popen([command] + r1, stdout=f1) with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2: p2 = subprocess.Popen([command] + r2, stdout=f2) p1.wait() p2.wait() # Write to fileStore r1_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq')) r2_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq')) job.fileStore.deleteGlobalFile(tar_id) # Start cutadapt step job.addChildJobFn(cutadapt, inputs, r1_id, r2_id, disk='60G').rv()
[ "def", "process_sample", "(", "job", ",", "inputs", ",", "tar_id", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Processing sample into read pairs: {}'", ".", "format", "(", "inputs", ".", "uuid", ")", ")", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "# I/O", "tar_path", "=", "job", ".", "fileStore", ".", "readGlobalFile", "(", "tar_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'sample.tar'", ")", ")", "# Untar File and concat", "subprocess", ".", "check_call", "(", "[", "'tar'", ",", "'-xvf'", ",", "tar_path", ",", "'-C'", ",", "work_dir", "]", ")", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'sample.tar'", ")", ")", "# Grab files from tarball", "fastqs", "=", "[", "]", "for", "root", ",", "subdir", ",", "files", "in", "os", ".", "walk", "(", "work_dir", ")", ":", "fastqs", ".", "extend", "(", "[", "os", ".", "path", ".", "join", "(", "root", ",", "x", ")", "for", "x", "in", "files", "]", ")", "# Check for read 1 and read 2 files", "r1", "=", "sorted", "(", "[", "x", "for", "x", "in", "fastqs", "if", "'R1'", "in", "x", "]", ")", "r2", "=", "sorted", "(", "[", "x", "for", "x", "in", "fastqs", "if", "'R2'", "in", "x", "]", ")", "if", "not", "r1", "or", "not", "r2", ":", "# Check if using a different standard", "r1", "=", "sorted", "(", "[", "x", "for", "x", "in", "fastqs", "if", "'_1'", "in", "x", "]", ")", "r2", "=", "sorted", "(", "[", "x", "for", "x", "in", "fastqs", "if", "'_2'", "in", "x", "]", ")", "# Prune file name matches from each list", "if", "len", "(", "r1", ")", ">", "len", "(", "r2", ")", ":", "r1", "=", "[", "x", "for", "x", "in", "r1", "if", "x", "not", "in", "r2", "]", "elif", "len", "(", "r2", ")", ">", "len", "(", "r1", ")", ":", "r2", "=", "[", "x", "for", "x", "in", "r2", "if", "x", "not", "in", "r1", "]", "# Flag if data is single-ended", "assert", "r1", "and", "r2", ",", "'This pipeline does not support single-ended data. R1: {}\\nR2:{}'", ".", "format", "(", "r1", ",", "r2", ")", "command", "=", "'zcat'", "if", "r1", "[", "0", "]", ".", "endswith", "(", "'gz'", ")", "and", "r2", "[", "0", "]", ".", "endswith", "(", "'gz'", ")", "else", "'cat'", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R1.fastq'", ")", ",", "'w'", ")", "as", "f1", ":", "p1", "=", "subprocess", ".", "Popen", "(", "[", "command", "]", "+", "r1", ",", "stdout", "=", "f1", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R2.fastq'", ")", ",", "'w'", ")", "as", "f2", ":", "p2", "=", "subprocess", ".", "Popen", "(", "[", "command", "]", "+", "r2", ",", "stdout", "=", "f2", ")", "p1", ".", "wait", "(", ")", "p2", ".", "wait", "(", ")", "# Write to fileStore", "r1_id", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R1.fastq'", ")", ")", "r2_id", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R2.fastq'", ")", ")", "job", ".", "fileStore", ".", "deleteGlobalFile", "(", "tar_id", ")", "# Start cutadapt step", "job", ".", "addChildJobFn", "(", "cutadapt", ",", "inputs", ",", "r1_id", ",", "r2_id", ",", "disk", "=", "'60G'", ")", ".", "rv", "(", ")" ]
Converts sample.tar(.gz) into two fastq files. Due to edge conditions... BEWARE: HERE BE DRAGONS :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str tar_id: FileStore ID of sample tar
[ "Converts", "sample", ".", "tar", "(", ".", "gz", ")", "into", "two", "fastq", "files", ".", "Due", "to", "edge", "conditions", "...", "BEWARE", ":", "HERE", "BE", "DRAGONS" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/spladder_pipeline/spladder_pipeline.py#L83-L129
BD2KGenomics/toil-scripts
src/toil_scripts/spladder_pipeline/spladder_pipeline.py
cutadapt
def cutadapt(job, inputs, r1_id, r2_id): """ Filters out adapters that may be left in the RNA-seq files :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str r1_id: FileStore ID of read 1 fastq :param str r2_id: FileStore ID of read 2 fastq """ job.fileStore.logToMaster('Running CutAdapt: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() inputs.improper_pair = None # Retrieve files job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq')) job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq')) # Cutadapt parameters parameters = ['-a', inputs.fwd_3pr_adapter, '-m', '35', '-A', inputs.rev_3pr_adapter, '-o', '/data/R1_cutadapt.fastq', '-p', '/data/R2_cutadapt.fastq', '/data/R1.fastq', '/data/R2.fastq'] # Call: CutAdapt base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(work_dir).split() if inputs.sudo: base_docker_call = ['sudo'] + base_docker_call tool = 'quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2' p = subprocess.Popen(base_docker_call + [tool] + parameters, stderr=subprocess.PIPE, stdout=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: if 'improperly paired' in stderr: inputs.improper_pair = True shutil.move(os.path.join(work_dir, 'R1.fastq'), os.path.join(work_dir, 'R1_cutadapt.fastq')) shutil.move(os.path.join(work_dir, 'R2.fastq'), os.path.join(work_dir, 'R2_cutadapt.fastq')) # Write to fileStore if inputs.improper_pair: r1_cutadapt = r1_id r2_cutadapt = r2_id else: r1_cutadapt = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq')) r2_cutadapt = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2_cutadapt.fastq')) job.fileStore.deleteGlobalFile(r1_id) job.fileStore.deleteGlobalFile(r2_id) # start STAR cores = min(inputs.cores, 16) job.addChildJobFn(star, inputs, r1_cutadapt, r2_cutadapt, cores=cores, disk='100G', memory='40G').rv()
python
def cutadapt(job, inputs, r1_id, r2_id): """ Filters out adapters that may be left in the RNA-seq files :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str r1_id: FileStore ID of read 1 fastq :param str r2_id: FileStore ID of read 2 fastq """ job.fileStore.logToMaster('Running CutAdapt: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() inputs.improper_pair = None # Retrieve files job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq')) job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq')) # Cutadapt parameters parameters = ['-a', inputs.fwd_3pr_adapter, '-m', '35', '-A', inputs.rev_3pr_adapter, '-o', '/data/R1_cutadapt.fastq', '-p', '/data/R2_cutadapt.fastq', '/data/R1.fastq', '/data/R2.fastq'] # Call: CutAdapt base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(work_dir).split() if inputs.sudo: base_docker_call = ['sudo'] + base_docker_call tool = 'quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2' p = subprocess.Popen(base_docker_call + [tool] + parameters, stderr=subprocess.PIPE, stdout=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: if 'improperly paired' in stderr: inputs.improper_pair = True shutil.move(os.path.join(work_dir, 'R1.fastq'), os.path.join(work_dir, 'R1_cutadapt.fastq')) shutil.move(os.path.join(work_dir, 'R2.fastq'), os.path.join(work_dir, 'R2_cutadapt.fastq')) # Write to fileStore if inputs.improper_pair: r1_cutadapt = r1_id r2_cutadapt = r2_id else: r1_cutadapt = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq')) r2_cutadapt = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2_cutadapt.fastq')) job.fileStore.deleteGlobalFile(r1_id) job.fileStore.deleteGlobalFile(r2_id) # start STAR cores = min(inputs.cores, 16) job.addChildJobFn(star, inputs, r1_cutadapt, r2_cutadapt, cores=cores, disk='100G', memory='40G').rv()
[ "def", "cutadapt", "(", "job", ",", "inputs", ",", "r1_id", ",", "r2_id", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Running CutAdapt: {}'", ".", "format", "(", "inputs", ".", "uuid", ")", ")", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "inputs", ".", "improper_pair", "=", "None", "# Retrieve files", "job", ".", "fileStore", ".", "readGlobalFile", "(", "r1_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R1.fastq'", ")", ")", "job", ".", "fileStore", ".", "readGlobalFile", "(", "r2_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R2.fastq'", ")", ")", "# Cutadapt parameters", "parameters", "=", "[", "'-a'", ",", "inputs", ".", "fwd_3pr_adapter", ",", "'-m'", ",", "'35'", ",", "'-A'", ",", "inputs", ".", "rev_3pr_adapter", ",", "'-o'", ",", "'/data/R1_cutadapt.fastq'", ",", "'-p'", ",", "'/data/R2_cutadapt.fastq'", ",", "'/data/R1.fastq'", ",", "'/data/R2.fastq'", "]", "# Call: CutAdapt", "base_docker_call", "=", "'docker run --log-driver=none --rm -v {}:/data'", ".", "format", "(", "work_dir", ")", ".", "split", "(", ")", "if", "inputs", ".", "sudo", ":", "base_docker_call", "=", "[", "'sudo'", "]", "+", "base_docker_call", "tool", "=", "'quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2'", "p", "=", "subprocess", ".", "Popen", "(", "base_docker_call", "+", "[", "tool", "]", "+", "parameters", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "if", "p", ".", "returncode", "!=", "0", ":", "if", "'improperly paired'", "in", "stderr", ":", "inputs", ".", "improper_pair", "=", "True", "shutil", ".", "move", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R1.fastq'", ")", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R1_cutadapt.fastq'", ")", ")", "shutil", ".", "move", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R2.fastq'", ")", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R2_cutadapt.fastq'", ")", ")", "# Write to fileStore", "if", "inputs", ".", "improper_pair", ":", "r1_cutadapt", "=", "r1_id", "r2_cutadapt", "=", "r2_id", "else", ":", "r1_cutadapt", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R1_cutadapt.fastq'", ")", ")", "r2_cutadapt", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R2_cutadapt.fastq'", ")", ")", "job", ".", "fileStore", ".", "deleteGlobalFile", "(", "r1_id", ")", "job", ".", "fileStore", ".", "deleteGlobalFile", "(", "r2_id", ")", "# start STAR", "cores", "=", "min", "(", "inputs", ".", "cores", ",", "16", ")", "job", ".", "addChildJobFn", "(", "star", ",", "inputs", ",", "r1_cutadapt", ",", "r2_cutadapt", ",", "cores", "=", "cores", ",", "disk", "=", "'100G'", ",", "memory", "=", "'40G'", ")", ".", "rv", "(", ")" ]
Filters out adapters that may be left in the RNA-seq files :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str r1_id: FileStore ID of read 1 fastq :param str r2_id: FileStore ID of read 2 fastq
[ "Filters", "out", "adapters", "that", "may", "be", "left", "in", "the", "RNA", "-", "seq", "files" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/spladder_pipeline/spladder_pipeline.py#L132-L177
BD2KGenomics/toil-scripts
src/toil_scripts/spladder_pipeline/spladder_pipeline.py
star
def star(job, inputs, r1_cutadapt, r2_cutadapt): """ Performs alignment of fastqs to BAM via STAR :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str r1_cutadapt: FileStore ID of read 1 fastq :param str r2_cutadapt: FileStore ID of read 2 fastq """ job.fileStore.logToMaster('Aligning with STAR: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() cores = min(inputs.cores, 16) # Retrieve files job.fileStore.readGlobalFile(r1_cutadapt, os.path.join(work_dir, 'R1_cutadapt.fastq')) job.fileStore.readGlobalFile(r2_cutadapt, os.path.join(work_dir, 'R2_cutadapt.fastq')) # Get starIndex download_url(job=job, url=inputs.star_index, work_dir=work_dir, name='starIndex.tar.gz') subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'starIndex.tar.gz'), '-C', work_dir]) # Parameters parameters = ['--runThreadN', str(cores), '--genomeDir', '/data/starIndex', '--outFileNamePrefix', 'rna', '--outSAMtype', 'BAM', 'SortedByCoordinate', '--outSAMunmapped', 'Within', '--quantMode', 'TranscriptomeSAM', '--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD', '--outFilterType', 'BySJout', '--outFilterMultimapNmax', '20', '--outFilterMismatchNmax', '999', '--outFilterMismatchNoverReadLmax', '0.04', '--alignIntronMin', '20', '--alignIntronMax', '1000000', '--alignMatesGapMax', '1000000', '--alignSJoverhangMin', '8', '--alignSJDBoverhangMin', '1', '--sjdbScore', '1', '--readFilesIn', '/data/R1_cutadapt.fastq', '/data/R2_cutadapt.fastq'] # Call: STAR Map docker_call(job=job, tool='quay.io/ucsc_cgl/star:2.4.2a--bcbd5122b69ff6ac4ef61958e47bde94001cfe80', work_dir=work_dir, parameters=parameters) # Call Samtools Index index_command = ['index', '/data/rnaAligned.sortedByCoord.out.bam'] docker_call(job=job, work_dir=work_dir, parameters=index_command, tool='quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c') # fileStore bam_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.sortedByCoord.out.bam')) bai_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.sortedByCoord.out.bam.bai')) job.fileStore.deleteGlobalFile(r1_cutadapt) job.fileStore.deleteGlobalFile(r2_cutadapt) # Launch children and follow-on vcqc_id = job.addChildJobFn(variant_calling_and_qc, inputs, bam_id, bai_id, cores=2, disk='30G').rv() spladder_id = job.addChildJobFn(spladder, inputs, bam_id, bai_id, disk='30G').rv() job.addFollowOnJobFn(consolidate_output_tarballs, inputs, vcqc_id, spladder_id, disk='30G')
python
def star(job, inputs, r1_cutadapt, r2_cutadapt): """ Performs alignment of fastqs to BAM via STAR :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str r1_cutadapt: FileStore ID of read 1 fastq :param str r2_cutadapt: FileStore ID of read 2 fastq """ job.fileStore.logToMaster('Aligning with STAR: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() cores = min(inputs.cores, 16) # Retrieve files job.fileStore.readGlobalFile(r1_cutadapt, os.path.join(work_dir, 'R1_cutadapt.fastq')) job.fileStore.readGlobalFile(r2_cutadapt, os.path.join(work_dir, 'R2_cutadapt.fastq')) # Get starIndex download_url(job=job, url=inputs.star_index, work_dir=work_dir, name='starIndex.tar.gz') subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'starIndex.tar.gz'), '-C', work_dir]) # Parameters parameters = ['--runThreadN', str(cores), '--genomeDir', '/data/starIndex', '--outFileNamePrefix', 'rna', '--outSAMtype', 'BAM', 'SortedByCoordinate', '--outSAMunmapped', 'Within', '--quantMode', 'TranscriptomeSAM', '--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD', '--outFilterType', 'BySJout', '--outFilterMultimapNmax', '20', '--outFilterMismatchNmax', '999', '--outFilterMismatchNoverReadLmax', '0.04', '--alignIntronMin', '20', '--alignIntronMax', '1000000', '--alignMatesGapMax', '1000000', '--alignSJoverhangMin', '8', '--alignSJDBoverhangMin', '1', '--sjdbScore', '1', '--readFilesIn', '/data/R1_cutadapt.fastq', '/data/R2_cutadapt.fastq'] # Call: STAR Map docker_call(job=job, tool='quay.io/ucsc_cgl/star:2.4.2a--bcbd5122b69ff6ac4ef61958e47bde94001cfe80', work_dir=work_dir, parameters=parameters) # Call Samtools Index index_command = ['index', '/data/rnaAligned.sortedByCoord.out.bam'] docker_call(job=job, work_dir=work_dir, parameters=index_command, tool='quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c') # fileStore bam_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.sortedByCoord.out.bam')) bai_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.sortedByCoord.out.bam.bai')) job.fileStore.deleteGlobalFile(r1_cutadapt) job.fileStore.deleteGlobalFile(r2_cutadapt) # Launch children and follow-on vcqc_id = job.addChildJobFn(variant_calling_and_qc, inputs, bam_id, bai_id, cores=2, disk='30G').rv() spladder_id = job.addChildJobFn(spladder, inputs, bam_id, bai_id, disk='30G').rv() job.addFollowOnJobFn(consolidate_output_tarballs, inputs, vcqc_id, spladder_id, disk='30G')
[ "def", "star", "(", "job", ",", "inputs", ",", "r1_cutadapt", ",", "r2_cutadapt", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Aligning with STAR: {}'", ".", "format", "(", "inputs", ".", "uuid", ")", ")", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "cores", "=", "min", "(", "inputs", ".", "cores", ",", "16", ")", "# Retrieve files", "job", ".", "fileStore", ".", "readGlobalFile", "(", "r1_cutadapt", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R1_cutadapt.fastq'", ")", ")", "job", ".", "fileStore", ".", "readGlobalFile", "(", "r2_cutadapt", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'R2_cutadapt.fastq'", ")", ")", "# Get starIndex", "download_url", "(", "job", "=", "job", ",", "url", "=", "inputs", ".", "star_index", ",", "work_dir", "=", "work_dir", ",", "name", "=", "'starIndex.tar.gz'", ")", "subprocess", ".", "check_call", "(", "[", "'tar'", ",", "'-xvf'", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'starIndex.tar.gz'", ")", ",", "'-C'", ",", "work_dir", "]", ")", "# Parameters", "parameters", "=", "[", "'--runThreadN'", ",", "str", "(", "cores", ")", ",", "'--genomeDir'", ",", "'/data/starIndex'", ",", "'--outFileNamePrefix'", ",", "'rna'", ",", "'--outSAMtype'", ",", "'BAM'", ",", "'SortedByCoordinate'", ",", "'--outSAMunmapped'", ",", "'Within'", ",", "'--quantMode'", ",", "'TranscriptomeSAM'", ",", "'--outSAMattributes'", ",", "'NH'", ",", "'HI'", ",", "'AS'", ",", "'NM'", ",", "'MD'", ",", "'--outFilterType'", ",", "'BySJout'", ",", "'--outFilterMultimapNmax'", ",", "'20'", ",", "'--outFilterMismatchNmax'", ",", "'999'", ",", "'--outFilterMismatchNoverReadLmax'", ",", "'0.04'", ",", "'--alignIntronMin'", ",", "'20'", ",", "'--alignIntronMax'", ",", "'1000000'", ",", "'--alignMatesGapMax'", ",", "'1000000'", ",", "'--alignSJoverhangMin'", ",", "'8'", ",", "'--alignSJDBoverhangMin'", ",", "'1'", ",", "'--sjdbScore'", ",", "'1'", ",", "'--readFilesIn'", ",", "'/data/R1_cutadapt.fastq'", ",", "'/data/R2_cutadapt.fastq'", "]", "# Call: STAR Map", "docker_call", "(", "job", "=", "job", ",", "tool", "=", "'quay.io/ucsc_cgl/star:2.4.2a--bcbd5122b69ff6ac4ef61958e47bde94001cfe80'", ",", "work_dir", "=", "work_dir", ",", "parameters", "=", "parameters", ")", "# Call Samtools Index", "index_command", "=", "[", "'index'", ",", "'/data/rnaAligned.sortedByCoord.out.bam'", "]", "docker_call", "(", "job", "=", "job", ",", "work_dir", "=", "work_dir", ",", "parameters", "=", "index_command", ",", "tool", "=", "'quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c'", ")", "# fileStore", "bam_id", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'rnaAligned.sortedByCoord.out.bam'", ")", ")", "bai_id", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'rnaAligned.sortedByCoord.out.bam.bai'", ")", ")", "job", ".", "fileStore", ".", "deleteGlobalFile", "(", "r1_cutadapt", ")", "job", ".", "fileStore", ".", "deleteGlobalFile", "(", "r2_cutadapt", ")", "# Launch children and follow-on", "vcqc_id", "=", "job", ".", "addChildJobFn", "(", "variant_calling_and_qc", ",", "inputs", ",", "bam_id", ",", "bai_id", ",", "cores", "=", "2", ",", "disk", "=", "'30G'", ")", ".", "rv", "(", ")", "spladder_id", "=", "job", ".", "addChildJobFn", "(", "spladder", ",", "inputs", ",", "bam_id", ",", "bai_id", ",", "disk", "=", "'30G'", ")", ".", "rv", "(", ")", "job", ".", "addFollowOnJobFn", "(", "consolidate_output_tarballs", ",", "inputs", ",", "vcqc_id", ",", "spladder_id", ",", "disk", "=", "'30G'", ")" ]
Performs alignment of fastqs to BAM via STAR :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str r1_cutadapt: FileStore ID of read 1 fastq :param str r2_cutadapt: FileStore ID of read 2 fastq
[ "Performs", "alignment", "of", "fastqs", "to", "BAM", "via", "STAR" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/spladder_pipeline/spladder_pipeline.py#L180-L232
BD2KGenomics/toil-scripts
src/toil_scripts/spladder_pipeline/spladder_pipeline.py
variant_calling_and_qc
def variant_calling_and_qc(job, inputs, bam_id, bai_id): """ Perform variant calling with samtools nad QC with CheckBias :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str bam_id: FileStore ID of bam :param str bai_id: FileStore ID of bam index file :return: FileStore ID of qc tarball :rtype: str """ job.fileStore.logToMaster('Variant calling and QC: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() # Pull in alignment.bam from fileStore job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'alignment.bam')) job.fileStore.readGlobalFile(bai_id, os.path.join(work_dir, 'alignment.bam.bai')) # Download input files input_info = [(inputs.genome, 'genome.fa'), (inputs.positions, 'positions.tsv'), (inputs.genome_index, 'genome.fa.fai'), (inputs.gtf, 'annotation.gtf'), (inputs.gtf_m53, 'annotation.m53')] for url, fname in input_info: download_url(job=job, url=url, work_dir=work_dir, name=fname) # Part 1: Variant Calling variant_command = ['mpileup', '-f', 'genome.fa', '-l', 'positions.tsv', '-v', 'alignment.bam', '-t', 'DP,SP,INFO/AD,INFO/ADF,INFO/ADR,INFO/DPR,SP', '-o', '/data/output.vcf.gz'] docker_call(job=job, work_dir=work_dir, parameters=variant_command, tool='quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c') # Part 2: QC qc_command = ['-o', 'qc', '-n', 'alignment.bam', '-a', 'annotation.gtf', '-m', 'annotation.m53'] docker_call(job=job, work_dir=work_dir, parameters=qc_command, tool='jvivian/checkbias:612f129--b08a1fb6526a620bbb0304b08356f2ae7c3c0ec3') # Write output to fileStore and return ids output_tsv = glob(os.path.join(work_dir, '*counts.tsv*'))[0] output_vcf = os.path.join(work_dir, 'output.vcf.gz') tarball_files('vcqc.tar.gz', file_paths=[output_tsv, output_vcf], output_dir=work_dir) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'vcqc.tar.gz'))
python
def variant_calling_and_qc(job, inputs, bam_id, bai_id): """ Perform variant calling with samtools nad QC with CheckBias :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str bam_id: FileStore ID of bam :param str bai_id: FileStore ID of bam index file :return: FileStore ID of qc tarball :rtype: str """ job.fileStore.logToMaster('Variant calling and QC: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() # Pull in alignment.bam from fileStore job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'alignment.bam')) job.fileStore.readGlobalFile(bai_id, os.path.join(work_dir, 'alignment.bam.bai')) # Download input files input_info = [(inputs.genome, 'genome.fa'), (inputs.positions, 'positions.tsv'), (inputs.genome_index, 'genome.fa.fai'), (inputs.gtf, 'annotation.gtf'), (inputs.gtf_m53, 'annotation.m53')] for url, fname in input_info: download_url(job=job, url=url, work_dir=work_dir, name=fname) # Part 1: Variant Calling variant_command = ['mpileup', '-f', 'genome.fa', '-l', 'positions.tsv', '-v', 'alignment.bam', '-t', 'DP,SP,INFO/AD,INFO/ADF,INFO/ADR,INFO/DPR,SP', '-o', '/data/output.vcf.gz'] docker_call(job=job, work_dir=work_dir, parameters=variant_command, tool='quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c') # Part 2: QC qc_command = ['-o', 'qc', '-n', 'alignment.bam', '-a', 'annotation.gtf', '-m', 'annotation.m53'] docker_call(job=job, work_dir=work_dir, parameters=qc_command, tool='jvivian/checkbias:612f129--b08a1fb6526a620bbb0304b08356f2ae7c3c0ec3') # Write output to fileStore and return ids output_tsv = glob(os.path.join(work_dir, '*counts.tsv*'))[0] output_vcf = os.path.join(work_dir, 'output.vcf.gz') tarball_files('vcqc.tar.gz', file_paths=[output_tsv, output_vcf], output_dir=work_dir) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'vcqc.tar.gz'))
[ "def", "variant_calling_and_qc", "(", "job", ",", "inputs", ",", "bam_id", ",", "bai_id", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Variant calling and QC: {}'", ".", "format", "(", "inputs", ".", "uuid", ")", ")", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "# Pull in alignment.bam from fileStore", "job", ".", "fileStore", ".", "readGlobalFile", "(", "bam_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'alignment.bam'", ")", ")", "job", ".", "fileStore", ".", "readGlobalFile", "(", "bai_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'alignment.bam.bai'", ")", ")", "# Download input files", "input_info", "=", "[", "(", "inputs", ".", "genome", ",", "'genome.fa'", ")", ",", "(", "inputs", ".", "positions", ",", "'positions.tsv'", ")", ",", "(", "inputs", ".", "genome_index", ",", "'genome.fa.fai'", ")", ",", "(", "inputs", ".", "gtf", ",", "'annotation.gtf'", ")", ",", "(", "inputs", ".", "gtf_m53", ",", "'annotation.m53'", ")", "]", "for", "url", ",", "fname", "in", "input_info", ":", "download_url", "(", "job", "=", "job", ",", "url", "=", "url", ",", "work_dir", "=", "work_dir", ",", "name", "=", "fname", ")", "# Part 1: Variant Calling", "variant_command", "=", "[", "'mpileup'", ",", "'-f'", ",", "'genome.fa'", ",", "'-l'", ",", "'positions.tsv'", ",", "'-v'", ",", "'alignment.bam'", ",", "'-t'", ",", "'DP,SP,INFO/AD,INFO/ADF,INFO/ADR,INFO/DPR,SP'", ",", "'-o'", ",", "'/data/output.vcf.gz'", "]", "docker_call", "(", "job", "=", "job", ",", "work_dir", "=", "work_dir", ",", "parameters", "=", "variant_command", ",", "tool", "=", "'quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c'", ")", "# Part 2: QC", "qc_command", "=", "[", "'-o'", ",", "'qc'", ",", "'-n'", ",", "'alignment.bam'", ",", "'-a'", ",", "'annotation.gtf'", ",", "'-m'", ",", "'annotation.m53'", "]", "docker_call", "(", "job", "=", "job", ",", "work_dir", "=", "work_dir", ",", "parameters", "=", "qc_command", ",", "tool", "=", "'jvivian/checkbias:612f129--b08a1fb6526a620bbb0304b08356f2ae7c3c0ec3'", ")", "# Write output to fileStore and return ids", "output_tsv", "=", "glob", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'*counts.tsv*'", ")", ")", "[", "0", "]", "output_vcf", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'output.vcf.gz'", ")", "tarball_files", "(", "'vcqc.tar.gz'", ",", "file_paths", "=", "[", "output_tsv", ",", "output_vcf", "]", ",", "output_dir", "=", "work_dir", ")", "return", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'vcqc.tar.gz'", ")", ")" ]
Perform variant calling with samtools nad QC with CheckBias :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str bam_id: FileStore ID of bam :param str bai_id: FileStore ID of bam index file :return: FileStore ID of qc tarball :rtype: str
[ "Perform", "variant", "calling", "with", "samtools", "nad", "QC", "with", "CheckBias" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/spladder_pipeline/spladder_pipeline.py#L235-L279
BD2KGenomics/toil-scripts
src/toil_scripts/spladder_pipeline/spladder_pipeline.py
spladder
def spladder(job, inputs, bam_id, bai_id): """ Run SplAdder to detect and quantify alternative splicing events :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str bam_id: FileStore ID of bam :param str bai_id: FileStore ID of bam index file :return: FileStore ID of SplAdder tarball :rtype: str """ job.fileStore.logToMaster('SplAdder: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() # Pull in alignment.bam from fileStore job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'alignment.bam')) job.fileStore.readGlobalFile(bai_id, os.path.join(work_dir, 'alignment.bam.bai')) # Download input file download_url(job=job, url=inputs.gtf, work_dir=work_dir, name='annotation.gtf') download_url(job=job, url=inputs.gtf_pickle, work_dir=work_dir, name='annotation.gtf.pickle') # Call Spladder command = ['--insert_ir=y', '--insert_es=y', '--insert_ni=y', '--remove_se=n', '--validate_sg=n', '-b', 'alignment.bam', '-o ', '/data', '-a', 'annotation.gtf', '-v', 'y', '-c', '3', '-M', 'single', '-T', 'n', '-n', '50', '-P', 'y', '-p', 'n', '--sparse_bam', 'y'] docker_call(job=job, work_dir=work_dir, parameters=command, sudo=inputs.sudo, tool='jvivian/spladder:1.0') # Write output to fileStore and return ids output_pickle = os.path.join(work_dir, ' ', 'spladder', 'genes_graph_conf3.alignment.pickle') if not os.path.exists(output_pickle): matches = [] for root, dirnames, filenames in os.walk(work_dir): for filename in fnmatch.filter(filenames, '*genes_graph*'): matches.append(os.path.join(root, filename)) if matches: output_pickle = matches[0] else: raise RuntimeError("Couldn't find genes file!") output_filt = os.path.join(work_dir, 'alignment.filt.hdf5') output = os.path.join(work_dir, 'alignment.hdf5') print os.listdir(work_dir) tarball_files('spladder.tar.gz', file_paths=[output_pickle, output_filt, output], output_dir=work_dir) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'spladder.tar.gz'))
python
def spladder(job, inputs, bam_id, bai_id): """ Run SplAdder to detect and quantify alternative splicing events :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str bam_id: FileStore ID of bam :param str bai_id: FileStore ID of bam index file :return: FileStore ID of SplAdder tarball :rtype: str """ job.fileStore.logToMaster('SplAdder: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() # Pull in alignment.bam from fileStore job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'alignment.bam')) job.fileStore.readGlobalFile(bai_id, os.path.join(work_dir, 'alignment.bam.bai')) # Download input file download_url(job=job, url=inputs.gtf, work_dir=work_dir, name='annotation.gtf') download_url(job=job, url=inputs.gtf_pickle, work_dir=work_dir, name='annotation.gtf.pickle') # Call Spladder command = ['--insert_ir=y', '--insert_es=y', '--insert_ni=y', '--remove_se=n', '--validate_sg=n', '-b', 'alignment.bam', '-o ', '/data', '-a', 'annotation.gtf', '-v', 'y', '-c', '3', '-M', 'single', '-T', 'n', '-n', '50', '-P', 'y', '-p', 'n', '--sparse_bam', 'y'] docker_call(job=job, work_dir=work_dir, parameters=command, sudo=inputs.sudo, tool='jvivian/spladder:1.0') # Write output to fileStore and return ids output_pickle = os.path.join(work_dir, ' ', 'spladder', 'genes_graph_conf3.alignment.pickle') if not os.path.exists(output_pickle): matches = [] for root, dirnames, filenames in os.walk(work_dir): for filename in fnmatch.filter(filenames, '*genes_graph*'): matches.append(os.path.join(root, filename)) if matches: output_pickle = matches[0] else: raise RuntimeError("Couldn't find genes file!") output_filt = os.path.join(work_dir, 'alignment.filt.hdf5') output = os.path.join(work_dir, 'alignment.hdf5') print os.listdir(work_dir) tarball_files('spladder.tar.gz', file_paths=[output_pickle, output_filt, output], output_dir=work_dir) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'spladder.tar.gz'))
[ "def", "spladder", "(", "job", ",", "inputs", ",", "bam_id", ",", "bai_id", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'SplAdder: {}'", ".", "format", "(", "inputs", ".", "uuid", ")", ")", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "# Pull in alignment.bam from fileStore", "job", ".", "fileStore", ".", "readGlobalFile", "(", "bam_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'alignment.bam'", ")", ")", "job", ".", "fileStore", ".", "readGlobalFile", "(", "bai_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'alignment.bam.bai'", ")", ")", "# Download input file", "download_url", "(", "job", "=", "job", ",", "url", "=", "inputs", ".", "gtf", ",", "work_dir", "=", "work_dir", ",", "name", "=", "'annotation.gtf'", ")", "download_url", "(", "job", "=", "job", ",", "url", "=", "inputs", ".", "gtf_pickle", ",", "work_dir", "=", "work_dir", ",", "name", "=", "'annotation.gtf.pickle'", ")", "# Call Spladder", "command", "=", "[", "'--insert_ir=y'", ",", "'--insert_es=y'", ",", "'--insert_ni=y'", ",", "'--remove_se=n'", ",", "'--validate_sg=n'", ",", "'-b'", ",", "'alignment.bam'", ",", "'-o '", ",", "'/data'", ",", "'-a'", ",", "'annotation.gtf'", ",", "'-v'", ",", "'y'", ",", "'-c'", ",", "'3'", ",", "'-M'", ",", "'single'", ",", "'-T'", ",", "'n'", ",", "'-n'", ",", "'50'", ",", "'-P'", ",", "'y'", ",", "'-p'", ",", "'n'", ",", "'--sparse_bam'", ",", "'y'", "]", "docker_call", "(", "job", "=", "job", ",", "work_dir", "=", "work_dir", ",", "parameters", "=", "command", ",", "sudo", "=", "inputs", ".", "sudo", ",", "tool", "=", "'jvivian/spladder:1.0'", ")", "# Write output to fileStore and return ids", "output_pickle", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "' '", ",", "'spladder'", ",", "'genes_graph_conf3.alignment.pickle'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "output_pickle", ")", ":", "matches", "=", "[", "]", "for", "root", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "work_dir", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "filenames", ",", "'*genes_graph*'", ")", ":", "matches", ".", "append", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "if", "matches", ":", "output_pickle", "=", "matches", "[", "0", "]", "else", ":", "raise", "RuntimeError", "(", "\"Couldn't find genes file!\"", ")", "output_filt", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'alignment.filt.hdf5'", ")", "output", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'alignment.hdf5'", ")", "print", "os", ".", "listdir", "(", "work_dir", ")", "tarball_files", "(", "'spladder.tar.gz'", ",", "file_paths", "=", "[", "output_pickle", ",", "output_filt", ",", "output", "]", ",", "output_dir", "=", "work_dir", ")", "return", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'spladder.tar.gz'", ")", ")" ]
Run SplAdder to detect and quantify alternative splicing events :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str bam_id: FileStore ID of bam :param str bai_id: FileStore ID of bam index file :return: FileStore ID of SplAdder tarball :rtype: str
[ "Run", "SplAdder", "to", "detect", "and", "quantify", "alternative", "splicing", "events" ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/spladder_pipeline/spladder_pipeline.py#L282-L334
BD2KGenomics/toil-scripts
src/toil_scripts/spladder_pipeline/spladder_pipeline.py
consolidate_output_tarballs
def consolidate_output_tarballs(job, inputs, vcqc_id, spladder_id): """ Combine the contents of separate tarballs into one. :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str vcqc_id: FileStore ID of variant calling and QC tarball :param str spladder_id: FileStore ID of spladder tarball """ job.fileStore.logToMaster('Consolidating files and uploading: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() # Retrieve IDs uuid = inputs.uuid # Unpack IDs # Retrieve output file paths to consolidate vcqc_tar = job.fileStore.readGlobalFile(vcqc_id, os.path.join(work_dir, 'vcqc.tar.gz')) spladder_tar = job.fileStore.readGlobalFile(spladder_id, os.path.join(work_dir, 'spladder.tar.gz')) # I/O fname = uuid + '.tar.gz' if not inputs.improper_pair else 'IMPROPER_PAIR' + uuid + '.tar.gz' out_tar = os.path.join(work_dir, fname) # Consolidate separate tarballs into one with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out: for tar in [vcqc_tar, spladder_tar]: with tarfile.open(tar, 'r') as f_in: for tarinfo in f_in: with closing(f_in.extractfile(tarinfo)) as f_in_file: if tar == vcqc_tar: tarinfo.name = os.path.join(uuid, 'variants_and_qc', os.path.basename(tarinfo.name)) else: tarinfo.name = os.path.join(uuid, 'spladder', os.path.basename(tarinfo.name)) f_out.addfile(tarinfo, fileobj=f_in_file) # Move to output directory if inputs.output_dir: mkdir_p(inputs.output_dir) shutil.copy(out_tar, os.path.join(inputs.output_dir, os.path.basename(out_tar))) # Upload to S3 if inputs.output_s3_dir: out_id = job.fileStore.writeGlobalFile(out_tar) job.addChildJobFn(s3am_upload_job, file_id=out_id, s3_dir=inputs.output_s3_dir, file_name=fname, key_path=inputs.ssec, cores=inputs.cores)
python
def consolidate_output_tarballs(job, inputs, vcqc_id, spladder_id): """ Combine the contents of separate tarballs into one. :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str vcqc_id: FileStore ID of variant calling and QC tarball :param str spladder_id: FileStore ID of spladder tarball """ job.fileStore.logToMaster('Consolidating files and uploading: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() # Retrieve IDs uuid = inputs.uuid # Unpack IDs # Retrieve output file paths to consolidate vcqc_tar = job.fileStore.readGlobalFile(vcqc_id, os.path.join(work_dir, 'vcqc.tar.gz')) spladder_tar = job.fileStore.readGlobalFile(spladder_id, os.path.join(work_dir, 'spladder.tar.gz')) # I/O fname = uuid + '.tar.gz' if not inputs.improper_pair else 'IMPROPER_PAIR' + uuid + '.tar.gz' out_tar = os.path.join(work_dir, fname) # Consolidate separate tarballs into one with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out: for tar in [vcqc_tar, spladder_tar]: with tarfile.open(tar, 'r') as f_in: for tarinfo in f_in: with closing(f_in.extractfile(tarinfo)) as f_in_file: if tar == vcqc_tar: tarinfo.name = os.path.join(uuid, 'variants_and_qc', os.path.basename(tarinfo.name)) else: tarinfo.name = os.path.join(uuid, 'spladder', os.path.basename(tarinfo.name)) f_out.addfile(tarinfo, fileobj=f_in_file) # Move to output directory if inputs.output_dir: mkdir_p(inputs.output_dir) shutil.copy(out_tar, os.path.join(inputs.output_dir, os.path.basename(out_tar))) # Upload to S3 if inputs.output_s3_dir: out_id = job.fileStore.writeGlobalFile(out_tar) job.addChildJobFn(s3am_upload_job, file_id=out_id, s3_dir=inputs.output_s3_dir, file_name=fname, key_path=inputs.ssec, cores=inputs.cores)
[ "def", "consolidate_output_tarballs", "(", "job", ",", "inputs", ",", "vcqc_id", ",", "spladder_id", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Consolidating files and uploading: {}'", ".", "format", "(", "inputs", ".", "uuid", ")", ")", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "# Retrieve IDs", "uuid", "=", "inputs", ".", "uuid", "# Unpack IDs", "# Retrieve output file paths to consolidate", "vcqc_tar", "=", "job", ".", "fileStore", ".", "readGlobalFile", "(", "vcqc_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'vcqc.tar.gz'", ")", ")", "spladder_tar", "=", "job", ".", "fileStore", ".", "readGlobalFile", "(", "spladder_id", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'spladder.tar.gz'", ")", ")", "# I/O", "fname", "=", "uuid", "+", "'.tar.gz'", "if", "not", "inputs", ".", "improper_pair", "else", "'IMPROPER_PAIR'", "+", "uuid", "+", "'.tar.gz'", "out_tar", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "fname", ")", "# Consolidate separate tarballs into one", "with", "tarfile", ".", "open", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "out_tar", ")", ",", "'w:gz'", ")", "as", "f_out", ":", "for", "tar", "in", "[", "vcqc_tar", ",", "spladder_tar", "]", ":", "with", "tarfile", ".", "open", "(", "tar", ",", "'r'", ")", "as", "f_in", ":", "for", "tarinfo", "in", "f_in", ":", "with", "closing", "(", "f_in", ".", "extractfile", "(", "tarinfo", ")", ")", "as", "f_in_file", ":", "if", "tar", "==", "vcqc_tar", ":", "tarinfo", ".", "name", "=", "os", ".", "path", ".", "join", "(", "uuid", ",", "'variants_and_qc'", ",", "os", ".", "path", ".", "basename", "(", "tarinfo", ".", "name", ")", ")", "else", ":", "tarinfo", ".", "name", "=", "os", ".", "path", ".", "join", "(", "uuid", ",", "'spladder'", ",", "os", ".", "path", ".", "basename", "(", "tarinfo", ".", "name", ")", ")", "f_out", ".", "addfile", "(", "tarinfo", ",", "fileobj", "=", "f_in_file", ")", "# Move to output directory", "if", "inputs", ".", "output_dir", ":", "mkdir_p", "(", "inputs", ".", "output_dir", ")", "shutil", ".", "copy", "(", "out_tar", ",", "os", ".", "path", ".", "join", "(", "inputs", ".", "output_dir", ",", "os", ".", "path", ".", "basename", "(", "out_tar", ")", ")", ")", "# Upload to S3", "if", "inputs", ".", "output_s3_dir", ":", "out_id", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "out_tar", ")", "job", ".", "addChildJobFn", "(", "s3am_upload_job", ",", "file_id", "=", "out_id", ",", "s3_dir", "=", "inputs", ".", "output_s3_dir", ",", "file_name", "=", "fname", ",", "key_path", "=", "inputs", ".", "ssec", ",", "cores", "=", "inputs", ".", "cores", ")" ]
Combine the contents of separate tarballs into one. :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str vcqc_id: FileStore ID of variant calling and QC tarball :param str spladder_id: FileStore ID of spladder tarball
[ "Combine", "the", "contents", "of", "separate", "tarballs", "into", "one", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/spladder_pipeline/spladder_pipeline.py#L337-L376
BD2KGenomics/toil-scripts
src/toil_scripts/spladder_pipeline/spladder_pipeline.py
main
def main(): """ This Toil pipeline aligns reads and performs alternative splicing analysis. Please read the README.md located in the same directory for run instructions. """ # Define Parser object and add to toil url_prefix = 'https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/' parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--config', required=True, help='Path to configuration file for samples, one per line. UUID,URL_to_bamfile. ' 'The URL may be a standard "http://", a "file://<abs_path>", or "s3://<bucket>/<key>"') parser.add_argument('--gtf', help='URL to annotation GTF file', default=url_prefix + 'rnaseq_cgl/gencode.v23.annotation.gtf') parser.add_argument('--gtf-pickle', help='Pickled GTF file', default=url_prefix + 'spladder/gencode.v23.annotation.gtf.pickle') parser.add_argument('--gtf-m53', help='M53 preprocessing annotation table', default=url_prefix + 'spladder/gencode.v23.annotation.gtf.m53') parser.add_argument('--positions', help='URL to SNP positions over genes file (TSV)', default=url_prefix + 'spladder/positions_fixed.tsv') parser.add_argument('--genome', help='URL to Genome fasta', default=url_prefix + 'rnaseq_cgl/hg38_no_alt.fa') parser.add_argument('--genome-index', help='Index file (fai) of genome', default=url_prefix + 'spladder/hg38_no_alt.fa.fai') parser.add_argument('--ssec', default=None, help='Path to master key used for downloading encrypted files.') parser.add_argument('--output-s3-dir', default=None, help='S3 Directory of the form: s3://bucket/directory') parser.add_argument('--output-dir', default=None, help='full path where final results will be output') parser.add_argument('--sudo', action='store_true', default=False, help='Set flag if sudo is required to run Docker.') parser.add_argument('--star-index', help='URL to download STAR Index built from HG38/gencodev23 annotation.', default=url_prefix + 'rnaseq_cgl/starIndex_hg38_no_alt.tar.gz') parser.add_argument('--fwd-3pr-adapter', help="Sequence for the FWD 3' Read Adapter.", default='AGATCGGAAGAG') parser.add_argument('--rev-3pr-adapter', help="Sequence for the REV 3' Read Adapter.", default='AGATCGGAAGAG') Job.Runner.addToilOptions(parser) args = parser.parse_args() # Sanity Checks if args.config: assert os.path.isfile(args.config), 'Config not found at: {}'.format(args.config) if args.ssec: assert os.path.isfile(args.ssec), 'Encryption key not found at: {}'.format(args.config) if args.output_s3_dir: assert args.output_s3_dir.startswith('s3://'), 'Wrong format for output s3 directory' # Program checks for program in ['curl', 'docker']: assert which(program), 'Program "{}" must be installed on every node.'.format(program) Job.Runner.startToil(Job.wrapJobFn(parse_input_samples, args), args)
python
def main(): """ This Toil pipeline aligns reads and performs alternative splicing analysis. Please read the README.md located in the same directory for run instructions. """ # Define Parser object and add to toil url_prefix = 'https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/' parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--config', required=True, help='Path to configuration file for samples, one per line. UUID,URL_to_bamfile. ' 'The URL may be a standard "http://", a "file://<abs_path>", or "s3://<bucket>/<key>"') parser.add_argument('--gtf', help='URL to annotation GTF file', default=url_prefix + 'rnaseq_cgl/gencode.v23.annotation.gtf') parser.add_argument('--gtf-pickle', help='Pickled GTF file', default=url_prefix + 'spladder/gencode.v23.annotation.gtf.pickle') parser.add_argument('--gtf-m53', help='M53 preprocessing annotation table', default=url_prefix + 'spladder/gencode.v23.annotation.gtf.m53') parser.add_argument('--positions', help='URL to SNP positions over genes file (TSV)', default=url_prefix + 'spladder/positions_fixed.tsv') parser.add_argument('--genome', help='URL to Genome fasta', default=url_prefix + 'rnaseq_cgl/hg38_no_alt.fa') parser.add_argument('--genome-index', help='Index file (fai) of genome', default=url_prefix + 'spladder/hg38_no_alt.fa.fai') parser.add_argument('--ssec', default=None, help='Path to master key used for downloading encrypted files.') parser.add_argument('--output-s3-dir', default=None, help='S3 Directory of the form: s3://bucket/directory') parser.add_argument('--output-dir', default=None, help='full path where final results will be output') parser.add_argument('--sudo', action='store_true', default=False, help='Set flag if sudo is required to run Docker.') parser.add_argument('--star-index', help='URL to download STAR Index built from HG38/gencodev23 annotation.', default=url_prefix + 'rnaseq_cgl/starIndex_hg38_no_alt.tar.gz') parser.add_argument('--fwd-3pr-adapter', help="Sequence for the FWD 3' Read Adapter.", default='AGATCGGAAGAG') parser.add_argument('--rev-3pr-adapter', help="Sequence for the REV 3' Read Adapter.", default='AGATCGGAAGAG') Job.Runner.addToilOptions(parser) args = parser.parse_args() # Sanity Checks if args.config: assert os.path.isfile(args.config), 'Config not found at: {}'.format(args.config) if args.ssec: assert os.path.isfile(args.ssec), 'Encryption key not found at: {}'.format(args.config) if args.output_s3_dir: assert args.output_s3_dir.startswith('s3://'), 'Wrong format for output s3 directory' # Program checks for program in ['curl', 'docker']: assert which(program), 'Program "{}" must be installed on every node.'.format(program) Job.Runner.startToil(Job.wrapJobFn(parse_input_samples, args), args)
[ "def", "main", "(", ")", ":", "# Define Parser object and add to toil", "url_prefix", "=", "'https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/'", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "main", ".", "__doc__", ",", "formatter_class", "=", "argparse", ".", "RawTextHelpFormatter", ")", "parser", ".", "add_argument", "(", "'--config'", ",", "required", "=", "True", ",", "help", "=", "'Path to configuration file for samples, one per line. UUID,URL_to_bamfile. '", "'The URL may be a standard \"http://\", a \"file://<abs_path>\", or \"s3://<bucket>/<key>\"'", ")", "parser", ".", "add_argument", "(", "'--gtf'", ",", "help", "=", "'URL to annotation GTF file'", ",", "default", "=", "url_prefix", "+", "'rnaseq_cgl/gencode.v23.annotation.gtf'", ")", "parser", ".", "add_argument", "(", "'--gtf-pickle'", ",", "help", "=", "'Pickled GTF file'", ",", "default", "=", "url_prefix", "+", "'spladder/gencode.v23.annotation.gtf.pickle'", ")", "parser", ".", "add_argument", "(", "'--gtf-m53'", ",", "help", "=", "'M53 preprocessing annotation table'", ",", "default", "=", "url_prefix", "+", "'spladder/gencode.v23.annotation.gtf.m53'", ")", "parser", ".", "add_argument", "(", "'--positions'", ",", "help", "=", "'URL to SNP positions over genes file (TSV)'", ",", "default", "=", "url_prefix", "+", "'spladder/positions_fixed.tsv'", ")", "parser", ".", "add_argument", "(", "'--genome'", ",", "help", "=", "'URL to Genome fasta'", ",", "default", "=", "url_prefix", "+", "'rnaseq_cgl/hg38_no_alt.fa'", ")", "parser", ".", "add_argument", "(", "'--genome-index'", ",", "help", "=", "'Index file (fai) of genome'", ",", "default", "=", "url_prefix", "+", "'spladder/hg38_no_alt.fa.fai'", ")", "parser", ".", "add_argument", "(", "'--ssec'", ",", "default", "=", "None", ",", "help", "=", "'Path to master key used for downloading encrypted files.'", ")", "parser", ".", "add_argument", "(", "'--output-s3-dir'", ",", "default", "=", "None", ",", "help", "=", "'S3 Directory of the form: s3://bucket/directory'", ")", "parser", ".", "add_argument", "(", "'--output-dir'", ",", "default", "=", "None", ",", "help", "=", "'full path where final results will be output'", ")", "parser", ".", "add_argument", "(", "'--sudo'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Set flag if sudo is required to run Docker.'", ")", "parser", ".", "add_argument", "(", "'--star-index'", ",", "help", "=", "'URL to download STAR Index built from HG38/gencodev23 annotation.'", ",", "default", "=", "url_prefix", "+", "'rnaseq_cgl/starIndex_hg38_no_alt.tar.gz'", ")", "parser", ".", "add_argument", "(", "'--fwd-3pr-adapter'", ",", "help", "=", "\"Sequence for the FWD 3' Read Adapter.\"", ",", "default", "=", "'AGATCGGAAGAG'", ")", "parser", ".", "add_argument", "(", "'--rev-3pr-adapter'", ",", "help", "=", "\"Sequence for the REV 3' Read Adapter.\"", ",", "default", "=", "'AGATCGGAAGAG'", ")", "Job", ".", "Runner", ".", "addToilOptions", "(", "parser", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "# Sanity Checks", "if", "args", ".", "config", ":", "assert", "os", ".", "path", ".", "isfile", "(", "args", ".", "config", ")", ",", "'Config not found at: {}'", ".", "format", "(", "args", ".", "config", ")", "if", "args", ".", "ssec", ":", "assert", "os", ".", "path", ".", "isfile", "(", "args", ".", "ssec", ")", ",", "'Encryption key not found at: {}'", ".", "format", "(", "args", ".", "config", ")", "if", "args", ".", "output_s3_dir", ":", "assert", "args", ".", "output_s3_dir", ".", "startswith", "(", "'s3://'", ")", ",", "'Wrong format for output s3 directory'", "# Program checks", "for", "program", "in", "[", "'curl'", ",", "'docker'", "]", ":", "assert", "which", "(", "program", ")", ",", "'Program \"{}\" must be installed on every node.'", ".", "format", "(", "program", ")", "Job", ".", "Runner", ".", "startToil", "(", "Job", ".", "wrapJobFn", "(", "parse_input_samples", ",", "args", ")", ",", "args", ")" ]
This Toil pipeline aligns reads and performs alternative splicing analysis. Please read the README.md located in the same directory for run instructions.
[ "This", "Toil", "pipeline", "aligns", "reads", "and", "performs", "alternative", "splicing", "analysis", "." ]
train
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/spladder_pipeline/spladder_pipeline.py#L379-L425
bd808/python-iptools
iptools/ipv4.py
validate_ip
def validate_ip(s): """Validate a dotted-quad ip address. The string is considered a valid dotted-quad address if it consists of one to four octets (0-255) seperated by periods (.). >>> validate_ip('127.0.0.1') True >>> validate_ip('127.0') True >>> validate_ip('127.0.0.256') False >>> validate_ip(LOCALHOST) True >>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected string or buffer :param s: String to validate as a dotted-quad ip address. :type s: str :returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise. :raises: TypeError """ if _DOTTED_QUAD_RE.match(s): quads = s.split('.') for q in quads: if int(q) > 255: return False return True return False
python
def validate_ip(s): """Validate a dotted-quad ip address. The string is considered a valid dotted-quad address if it consists of one to four octets (0-255) seperated by periods (.). >>> validate_ip('127.0.0.1') True >>> validate_ip('127.0') True >>> validate_ip('127.0.0.256') False >>> validate_ip(LOCALHOST) True >>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected string or buffer :param s: String to validate as a dotted-quad ip address. :type s: str :returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise. :raises: TypeError """ if _DOTTED_QUAD_RE.match(s): quads = s.split('.') for q in quads: if int(q) > 255: return False return True return False
[ "def", "validate_ip", "(", "s", ")", ":", "if", "_DOTTED_QUAD_RE", ".", "match", "(", "s", ")", ":", "quads", "=", "s", ".", "split", "(", "'.'", ")", "for", "q", "in", "quads", ":", "if", "int", "(", "q", ")", ">", "255", ":", "return", "False", "return", "True", "return", "False" ]
Validate a dotted-quad ip address. The string is considered a valid dotted-quad address if it consists of one to four octets (0-255) seperated by periods (.). >>> validate_ip('127.0.0.1') True >>> validate_ip('127.0') True >>> validate_ip('127.0.0.256') False >>> validate_ip(LOCALHOST) True >>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected string or buffer :param s: String to validate as a dotted-quad ip address. :type s: str :returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise. :raises: TypeError
[ "Validate", "a", "dotted", "-", "quad", "ip", "address", "." ]
train
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv4.py#L190-L222