id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
223,000
kieferk/dfply
dfply/reshape.py
unite
def unite(df, colname, *args, **kwargs): """ Does the inverse of `separate`, joining columns together by a specified separator. Any columns that are not strings will be converted to strings. Args: df (pandas.DataFrame): DataFrame passed in through the pipe. colname (str): the name of the new joined column. *args: list of columns to be joined, which can be strings, symbolic, or integer positions. Kwargs: sep (str): the string separator to join the columns with. remove (bool): Boolean indicating whether or not to remove the original columns. na_action (str): can be one of `'maintain'` (the default), '`ignore'`, or `'as_string'`. The default will make the new column row a `NaN` value if any of the original column cells at that row contained `NaN`. '`ignore'` will treat any `NaN` value as an empty string during joining. `'as_string'` will convert any `NaN` value to the string `'nan'` prior to joining. """ to_unite = list([a for a in flatten(args)]) sep = kwargs.get('sep', '_') remove = kwargs.get('remove', True) # possible na_action values # ignore: empty string # maintain: keep as np.nan (default) # as_string: becomes string 'nan' na_action = kwargs.get('na_action', 'maintain') # print(to_unite, sep, remove, na_action) if na_action == 'maintain': df[colname] = df[to_unite].apply(lambda x: np.nan if any(x.isnull()) else sep.join(x.map(str)), axis=1) elif na_action == 'ignore': df[colname] = df[to_unite].apply(lambda x: sep.join(x[~x.isnull()].map(str)), axis=1) elif na_action == 'as_string': df[colname] = df[to_unite].astype(str).apply(lambda x: sep.join(x), axis=1) if remove: df.drop(to_unite, axis=1, inplace=True) return df
python
def unite(df, colname, *args, **kwargs): """ Does the inverse of `separate`, joining columns together by a specified separator. Any columns that are not strings will be converted to strings. Args: df (pandas.DataFrame): DataFrame passed in through the pipe. colname (str): the name of the new joined column. *args: list of columns to be joined, which can be strings, symbolic, or integer positions. Kwargs: sep (str): the string separator to join the columns with. remove (bool): Boolean indicating whether or not to remove the original columns. na_action (str): can be one of `'maintain'` (the default), '`ignore'`, or `'as_string'`. The default will make the new column row a `NaN` value if any of the original column cells at that row contained `NaN`. '`ignore'` will treat any `NaN` value as an empty string during joining. `'as_string'` will convert any `NaN` value to the string `'nan'` prior to joining. """ to_unite = list([a for a in flatten(args)]) sep = kwargs.get('sep', '_') remove = kwargs.get('remove', True) # possible na_action values # ignore: empty string # maintain: keep as np.nan (default) # as_string: becomes string 'nan' na_action = kwargs.get('na_action', 'maintain') # print(to_unite, sep, remove, na_action) if na_action == 'maintain': df[colname] = df[to_unite].apply(lambda x: np.nan if any(x.isnull()) else sep.join(x.map(str)), axis=1) elif na_action == 'ignore': df[colname] = df[to_unite].apply(lambda x: sep.join(x[~x.isnull()].map(str)), axis=1) elif na_action == 'as_string': df[colname] = df[to_unite].astype(str).apply(lambda x: sep.join(x), axis=1) if remove: df.drop(to_unite, axis=1, inplace=True) return df
[ "def", "unite", "(", "df", ",", "colname", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "to_unite", "=", "list", "(", "[", "a", "for", "a", "in", "flatten", "(", "args", ")", "]", ")", "sep", "=", "kwargs", ".", "get", "(", "'sep'", "...
Does the inverse of `separate`, joining columns together by a specified separator. Any columns that are not strings will be converted to strings. Args: df (pandas.DataFrame): DataFrame passed in through the pipe. colname (str): the name of the new joined column. *args: list of columns to be joined, which can be strings, symbolic, or integer positions. Kwargs: sep (str): the string separator to join the columns with. remove (bool): Boolean indicating whether or not to remove the original columns. na_action (str): can be one of `'maintain'` (the default), '`ignore'`, or `'as_string'`. The default will make the new column row a `NaN` value if any of the original column cells at that row contained `NaN`. '`ignore'` will treat any `NaN` value as an empty string during joining. `'as_string'` will convert any `NaN` value to the string `'nan'` prior to joining.
[ "Does", "the", "inverse", "of", "separate", "joining", "columns", "together", "by", "a", "specified", "separator", "." ]
6a858f066602735a90f8b6b85106bc39ceadc282
https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/reshape.py#L281-L329
223,001
kieferk/dfply
dfply/set_ops.py
validate_set_ops
def validate_set_ops(df, other): """ Helper function to ensure that DataFrames are valid for set operations. Columns must be the same name in the same order, and indices must be of the same dimension with the same names. """ if df.columns.values.tolist() != other.columns.values.tolist(): not_in_df = [col for col in other.columns if col not in df.columns] not_in_other = [col for col in df.columns if col not in other.columns] error_string = 'Error: not compatible.' if len(not_in_df): error_string += ' Cols in y but not x: ' + str(not_in_df) + '.' if len(not_in_other): error_string += ' Cols in x but not y: ' + str(not_in_other) + '.' raise ValueError(error_string) if len(df.index.names) != len(other.index.names): raise ValueError('Index dimension mismatch') if df.index.names != other.index.names: raise ValueError('Index mismatch') else: return
python
def validate_set_ops(df, other): """ Helper function to ensure that DataFrames are valid for set operations. Columns must be the same name in the same order, and indices must be of the same dimension with the same names. """ if df.columns.values.tolist() != other.columns.values.tolist(): not_in_df = [col for col in other.columns if col not in df.columns] not_in_other = [col for col in df.columns if col not in other.columns] error_string = 'Error: not compatible.' if len(not_in_df): error_string += ' Cols in y but not x: ' + str(not_in_df) + '.' if len(not_in_other): error_string += ' Cols in x but not y: ' + str(not_in_other) + '.' raise ValueError(error_string) if len(df.index.names) != len(other.index.names): raise ValueError('Index dimension mismatch') if df.index.names != other.index.names: raise ValueError('Index mismatch') else: return
[ "def", "validate_set_ops", "(", "df", ",", "other", ")", ":", "if", "df", ".", "columns", ".", "values", ".", "tolist", "(", ")", "!=", "other", ".", "columns", ".", "values", ".", "tolist", "(", ")", ":", "not_in_df", "=", "[", "col", "for", "col"...
Helper function to ensure that DataFrames are valid for set operations. Columns must be the same name in the same order, and indices must be of the same dimension with the same names.
[ "Helper", "function", "to", "ensure", "that", "DataFrames", "are", "valid", "for", "set", "operations", ".", "Columns", "must", "be", "the", "same", "name", "in", "the", "same", "order", "and", "indices", "must", "be", "of", "the", "same", "dimension", "wi...
6a858f066602735a90f8b6b85106bc39ceadc282
https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/set_ops.py#L6-L27
223,002
kieferk/dfply
dfply/set_ops.py
union
def union(df, other, index=False, keep='first'): """ Returns rows that appear in either DataFrame. Args: df (pandas.DataFrame): data passed in through the pipe. other (pandas.DataFrame): other DataFrame to use for set operation with the first. Kwargs: index (bool): Boolean indicating whether to consider the pandas index as part of the set operation (default `False`). keep (str): Indicates which duplicate should be kept. Options are `'first'` and `'last'`. """ validate_set_ops(df, other) stacked = df.append(other) if index: stacked_reset_indexes = stacked.reset_index() index_cols = [col for col in stacked_reset_indexes.columns if col not in df.columns] index_name = df.index.names return_df = stacked_reset_indexes.drop_duplicates(keep=keep).set_index(index_cols) return_df.index.names = index_name return return_df else: return stacked.drop_duplicates(keep=keep)
python
def union(df, other, index=False, keep='first'): """ Returns rows that appear in either DataFrame. Args: df (pandas.DataFrame): data passed in through the pipe. other (pandas.DataFrame): other DataFrame to use for set operation with the first. Kwargs: index (bool): Boolean indicating whether to consider the pandas index as part of the set operation (default `False`). keep (str): Indicates which duplicate should be kept. Options are `'first'` and `'last'`. """ validate_set_ops(df, other) stacked = df.append(other) if index: stacked_reset_indexes = stacked.reset_index() index_cols = [col for col in stacked_reset_indexes.columns if col not in df.columns] index_name = df.index.names return_df = stacked_reset_indexes.drop_duplicates(keep=keep).set_index(index_cols) return_df.index.names = index_name return return_df else: return stacked.drop_duplicates(keep=keep)
[ "def", "union", "(", "df", ",", "other", ",", "index", "=", "False", ",", "keep", "=", "'first'", ")", ":", "validate_set_ops", "(", "df", ",", "other", ")", "stacked", "=", "df", ".", "append", "(", "other", ")", "if", "index", ":", "stacked_reset_i...
Returns rows that appear in either DataFrame. Args: df (pandas.DataFrame): data passed in through the pipe. other (pandas.DataFrame): other DataFrame to use for set operation with the first. Kwargs: index (bool): Boolean indicating whether to consider the pandas index as part of the set operation (default `False`). keep (str): Indicates which duplicate should be kept. Options are `'first'` and `'last'`.
[ "Returns", "rows", "that", "appear", "in", "either", "DataFrame", "." ]
6a858f066602735a90f8b6b85106bc39ceadc282
https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/set_ops.py#L35-L60
223,003
kieferk/dfply
dfply/set_ops.py
intersect
def intersect(df, other, index=False, keep='first'): """ Returns rows that appear in both DataFrames. Args: df (pandas.DataFrame): data passed in through the pipe. other (pandas.DataFrame): other DataFrame to use for set operation with the first. Kwargs: index (bool): Boolean indicating whether to consider the pandas index as part of the set operation (default `False`). keep (str): Indicates which duplicate should be kept. Options are `'first'` and `'last'`. """ validate_set_ops(df, other) if index: df_reset_index = df.reset_index() other_reset_index = other.reset_index() index_cols = [col for col in df_reset_index.columns if col not in df.columns] df_index_names = df.index.names return_df = (pd.merge(df_reset_index, other_reset_index, how='inner', left_on=df_reset_index.columns.values.tolist(), right_on=df_reset_index.columns.values.tolist()) .set_index(index_cols)) return_df.index.names = df_index_names return_df = return_df.drop_duplicates(keep=keep) return return_df else: return_df = pd.merge(df, other, how='inner', left_on=df.columns.values.tolist(), right_on=df.columns.values.tolist()) return_df = return_df.drop_duplicates(keep=keep) return return_df
python
def intersect(df, other, index=False, keep='first'): """ Returns rows that appear in both DataFrames. Args: df (pandas.DataFrame): data passed in through the pipe. other (pandas.DataFrame): other DataFrame to use for set operation with the first. Kwargs: index (bool): Boolean indicating whether to consider the pandas index as part of the set operation (default `False`). keep (str): Indicates which duplicate should be kept. Options are `'first'` and `'last'`. """ validate_set_ops(df, other) if index: df_reset_index = df.reset_index() other_reset_index = other.reset_index() index_cols = [col for col in df_reset_index.columns if col not in df.columns] df_index_names = df.index.names return_df = (pd.merge(df_reset_index, other_reset_index, how='inner', left_on=df_reset_index.columns.values.tolist(), right_on=df_reset_index.columns.values.tolist()) .set_index(index_cols)) return_df.index.names = df_index_names return_df = return_df.drop_duplicates(keep=keep) return return_df else: return_df = pd.merge(df, other, how='inner', left_on=df.columns.values.tolist(), right_on=df.columns.values.tolist()) return_df = return_df.drop_duplicates(keep=keep) return return_df
[ "def", "intersect", "(", "df", ",", "other", ",", "index", "=", "False", ",", "keep", "=", "'first'", ")", ":", "validate_set_ops", "(", "df", ",", "other", ")", "if", "index", ":", "df_reset_index", "=", "df", ".", "reset_index", "(", ")", "other_rese...
Returns rows that appear in both DataFrames. Args: df (pandas.DataFrame): data passed in through the pipe. other (pandas.DataFrame): other DataFrame to use for set operation with the first. Kwargs: index (bool): Boolean indicating whether to consider the pandas index as part of the set operation (default `False`). keep (str): Indicates which duplicate should be kept. Options are `'first'` and `'last'`.
[ "Returns", "rows", "that", "appear", "in", "both", "DataFrames", "." ]
6a858f066602735a90f8b6b85106bc39ceadc282
https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/set_ops.py#L69-L105
223,004
kieferk/dfply
dfply/transform.py
transmute
def transmute(df, *keep_columns, **kwargs): """ Creates columns and then returns those new columns and optionally specified original columns from the DataFrame. This works like `mutate`, but designed to discard the original columns used to create the new ones. Args: *keep_columns: Column labels to keep. Can be string, symbolic, or integer position. Kwargs: **kwargs: keys are the names of the new columns, values indicate what the new column values will be. Example: diamonds >> transmute(x_plus_y=X.x + X.y, y_div_z=(X.y / X.z)) >> head(3) y_div_z x_plus_y 0 1.637860 7.93 1 1.662338 7.73 2 1.761905 8.12 """ keep_cols = [] for col in flatten(keep_columns): try: keep_cols.append(col.name) except: if isinstance(col, str): keep_cols.append(col) elif isinstance(col, int): keep_cols.append(df.columns[col]) df = df.assign(**kwargs) columns = [k for k in kwargs.keys()] + list(keep_cols) return df[columns]
python
def transmute(df, *keep_columns, **kwargs): """ Creates columns and then returns those new columns and optionally specified original columns from the DataFrame. This works like `mutate`, but designed to discard the original columns used to create the new ones. Args: *keep_columns: Column labels to keep. Can be string, symbolic, or integer position. Kwargs: **kwargs: keys are the names of the new columns, values indicate what the new column values will be. Example: diamonds >> transmute(x_plus_y=X.x + X.y, y_div_z=(X.y / X.z)) >> head(3) y_div_z x_plus_y 0 1.637860 7.93 1 1.662338 7.73 2 1.761905 8.12 """ keep_cols = [] for col in flatten(keep_columns): try: keep_cols.append(col.name) except: if isinstance(col, str): keep_cols.append(col) elif isinstance(col, int): keep_cols.append(df.columns[col]) df = df.assign(**kwargs) columns = [k for k in kwargs.keys()] + list(keep_cols) return df[columns]
[ "def", "transmute", "(", "df", ",", "*", "keep_columns", ",", "*", "*", "kwargs", ")", ":", "keep_cols", "=", "[", "]", "for", "col", "in", "flatten", "(", "keep_columns", ")", ":", "try", ":", "keep_cols", ".", "append", "(", "col", ".", "name", "...
Creates columns and then returns those new columns and optionally specified original columns from the DataFrame. This works like `mutate`, but designed to discard the original columns used to create the new ones. Args: *keep_columns: Column labels to keep. Can be string, symbolic, or integer position. Kwargs: **kwargs: keys are the names of the new columns, values indicate what the new column values will be. Example: diamonds >> transmute(x_plus_y=X.x + X.y, y_div_z=(X.y / X.z)) >> head(3) y_div_z x_plus_y 0 1.637860 7.93 1 1.662338 7.73 2 1.761905 8.12
[ "Creates", "columns", "and", "then", "returns", "those", "new", "columns", "and", "optionally", "specified", "original", "columns", "from", "the", "DataFrame", "." ]
6a858f066602735a90f8b6b85106bc39ceadc282
https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/transform.py#L64-L101
223,005
kieferk/dfply
dfply/vector.py
coalesce
def coalesce(*series): """ Takes the first non-NaN value in order across the specified series, returning a new series. Mimics the coalesce function in dplyr and SQL. Args: *series: Series objects, typically represented in their symbolic form (like X.series). Example: df = pd.DataFrame({ 'a':[1,np.nan,np.nan,np.nan,np.nan], 'b':[2,3,np.nan,np.nan,np.nan], 'c':[np.nan,np.nan,4,5,np.nan], 'd':[6,7,8,9,np.nan] }) df >> transmute(coal=coalesce(X.a, X.b, X.c, X.d)) coal 0 1 1 3 2 4 3 5 4 np.nan """ series = [pd.Series(s) for s in series] coalescer = pd.concat(series, axis=1) min_nonna = np.argmin(pd.isnull(coalescer).values, axis=1) min_nonna = [coalescer.columns[i] for i in min_nonna] return coalescer.lookup(np.arange(coalescer.shape[0]), min_nonna)
python
def coalesce(*series): """ Takes the first non-NaN value in order across the specified series, returning a new series. Mimics the coalesce function in dplyr and SQL. Args: *series: Series objects, typically represented in their symbolic form (like X.series). Example: df = pd.DataFrame({ 'a':[1,np.nan,np.nan,np.nan,np.nan], 'b':[2,3,np.nan,np.nan,np.nan], 'c':[np.nan,np.nan,4,5,np.nan], 'd':[6,7,8,9,np.nan] }) df >> transmute(coal=coalesce(X.a, X.b, X.c, X.d)) coal 0 1 1 3 2 4 3 5 4 np.nan """ series = [pd.Series(s) for s in series] coalescer = pd.concat(series, axis=1) min_nonna = np.argmin(pd.isnull(coalescer).values, axis=1) min_nonna = [coalescer.columns[i] for i in min_nonna] return coalescer.lookup(np.arange(coalescer.shape[0]), min_nonna)
[ "def", "coalesce", "(", "*", "series", ")", ":", "series", "=", "[", "pd", ".", "Series", "(", "s", ")", "for", "s", "in", "series", "]", "coalescer", "=", "pd", ".", "concat", "(", "series", ",", "axis", "=", "1", ")", "min_nonna", "=", "np", ...
Takes the first non-NaN value in order across the specified series, returning a new series. Mimics the coalesce function in dplyr and SQL. Args: *series: Series objects, typically represented in their symbolic form (like X.series). Example: df = pd.DataFrame({ 'a':[1,np.nan,np.nan,np.nan,np.nan], 'b':[2,3,np.nan,np.nan,np.nan], 'c':[np.nan,np.nan,4,5,np.nan], 'd':[6,7,8,9,np.nan] }) df >> transmute(coal=coalesce(X.a, X.b, X.c, X.d)) coal 0 1 1 3 2 4 3 5 4 np.nan
[ "Takes", "the", "first", "non", "-", "NaN", "value", "in", "order", "across", "the", "specified", "series", "returning", "a", "new", "series", ".", "Mimics", "the", "coalesce", "function", "in", "dplyr", "and", "SQL", "." ]
6a858f066602735a90f8b6b85106bc39ceadc282
https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/vector.py#L80-L110
223,006
kieferk/dfply
dfply/vector.py
case_when
def case_when(*conditions): """ Functions as a switch statement, creating a new series out of logical conditions specified by 2-item lists where the left-hand item is the logical condition and the right-hand item is the value where that condition is true. Conditions should go from the most specific to the most general. A conditional that appears earlier in the series will "overwrite" one that appears later. Think of it like a series of if-else statements. The logicals and values of the condition pairs must be all the same length, or length 1. Logicals can be vectors of booleans or a single boolean (`True`, for example, can be the logical statement for the final conditional to catch all remaining.). Args: *conditions: Each condition should be a list with two values. The first value is a boolean or vector of booleans that specify indices in which the condition is met. The second value is a vector of values or single value specifying the outcome where that condition is met. Example: df = pd.DataFrame({ 'num':np.arange(16) }) df >> mutate(strnum=case_when([X.num % 15 == 0, 'fizzbuzz'], [X.num % 3 == 0, 'fizz'], [X.num % 5 == 0, 'buzz'], [True, X.num.astype(str)])) num strnum 0 0 fizzbuzz 1 1 1 2 2 2 3 3 fizz 4 4 4 5 5 buzz 6 6 fizz 7 7 7 8 8 8 9 9 fizz 10 10 buzz 11 11 11 12 12 fizz 13 13 13 14 14 14 15 15 fizzbuzz """ lengths = [] for logical, outcome in conditions: if isinstance(logical, collections.Iterable): lengths.append(len(logical)) if isinstance(outcome, collections.Iterable) and not isinstance(outcome, str): lengths.append(len(outcome)) unique_lengths = np.unique(lengths) assert len(unique_lengths) == 1 output_len = unique_lengths[0] output = [] for logical, outcome in conditions: if isinstance(logical, bool): logical = np.repeat(logical, output_len) if isinstance(logical, pd.Series): logical = logical.values if not isinstance(outcome, collections.Iterable) or isinstance(outcome, str): outcome = pd.Series(np.repeat(outcome, output_len)) outcome[~logical] = np.nan output.append(outcome) return coalesce(*output)
python
def case_when(*conditions): """ Functions as a switch statement, creating a new series out of logical conditions specified by 2-item lists where the left-hand item is the logical condition and the right-hand item is the value where that condition is true. Conditions should go from the most specific to the most general. A conditional that appears earlier in the series will "overwrite" one that appears later. Think of it like a series of if-else statements. The logicals and values of the condition pairs must be all the same length, or length 1. Logicals can be vectors of booleans or a single boolean (`True`, for example, can be the logical statement for the final conditional to catch all remaining.). Args: *conditions: Each condition should be a list with two values. The first value is a boolean or vector of booleans that specify indices in which the condition is met. The second value is a vector of values or single value specifying the outcome where that condition is met. Example: df = pd.DataFrame({ 'num':np.arange(16) }) df >> mutate(strnum=case_when([X.num % 15 == 0, 'fizzbuzz'], [X.num % 3 == 0, 'fizz'], [X.num % 5 == 0, 'buzz'], [True, X.num.astype(str)])) num strnum 0 0 fizzbuzz 1 1 1 2 2 2 3 3 fizz 4 4 4 5 5 buzz 6 6 fizz 7 7 7 8 8 8 9 9 fizz 10 10 buzz 11 11 11 12 12 fizz 13 13 13 14 14 14 15 15 fizzbuzz """ lengths = [] for logical, outcome in conditions: if isinstance(logical, collections.Iterable): lengths.append(len(logical)) if isinstance(outcome, collections.Iterable) and not isinstance(outcome, str): lengths.append(len(outcome)) unique_lengths = np.unique(lengths) assert len(unique_lengths) == 1 output_len = unique_lengths[0] output = [] for logical, outcome in conditions: if isinstance(logical, bool): logical = np.repeat(logical, output_len) if isinstance(logical, pd.Series): logical = logical.values if not isinstance(outcome, collections.Iterable) or isinstance(outcome, str): outcome = pd.Series(np.repeat(outcome, output_len)) outcome[~logical] = np.nan output.append(outcome) return coalesce(*output)
[ "def", "case_when", "(", "*", "conditions", ")", ":", "lengths", "=", "[", "]", "for", "logical", ",", "outcome", "in", "conditions", ":", "if", "isinstance", "(", "logical", ",", "collections", ".", "Iterable", ")", ":", "lengths", ".", "append", "(", ...
Functions as a switch statement, creating a new series out of logical conditions specified by 2-item lists where the left-hand item is the logical condition and the right-hand item is the value where that condition is true. Conditions should go from the most specific to the most general. A conditional that appears earlier in the series will "overwrite" one that appears later. Think of it like a series of if-else statements. The logicals and values of the condition pairs must be all the same length, or length 1. Logicals can be vectors of booleans or a single boolean (`True`, for example, can be the logical statement for the final conditional to catch all remaining.). Args: *conditions: Each condition should be a list with two values. The first value is a boolean or vector of booleans that specify indices in which the condition is met. The second value is a vector of values or single value specifying the outcome where that condition is met. Example: df = pd.DataFrame({ 'num':np.arange(16) }) df >> mutate(strnum=case_when([X.num % 15 == 0, 'fizzbuzz'], [X.num % 3 == 0, 'fizz'], [X.num % 5 == 0, 'buzz'], [True, X.num.astype(str)])) num strnum 0 0 fizzbuzz 1 1 1 2 2 2 3 3 fizz 4 4 4 5 5 buzz 6 6 fizz 7 7 7 8 8 8 9 9 fizz 10 10 buzz 11 11 11 12 12 fizz 13 13 13 14 14 14 15 15 fizzbuzz
[ "Functions", "as", "a", "switch", "statement", "creating", "a", "new", "series", "out", "of", "logical", "conditions", "specified", "by", "2", "-", "item", "lists", "where", "the", "left", "-", "hand", "item", "is", "the", "logical", "condition", "and", "t...
6a858f066602735a90f8b6b85106bc39ceadc282
https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/vector.py#L118-L189
223,007
kieferk/dfply
dfply/vector.py
if_else
def if_else(condition, when_true, otherwise): """ Wraps creation of a series based on if-else conditional logic into a function call. Provide a boolean vector condition, value(s) when true, and value(s) when false, and a vector will be returned the same length as the conditional vector according to the logical statement. Args: condition: A boolean vector representing the condition. This is often a logical statement with a symbolic series. when_true: A vector the same length as the condition vector or a single value to apply when the condition is `True`. otherwise: A vector the same length as the condition vector or a single value to apply when the condition is `False`. Example: df = pd.DataFrame """ if not isinstance(when_true, collections.Iterable) or isinstance(when_true, str): when_true = np.repeat(when_true, len(condition)) if not isinstance(otherwise, collections.Iterable) or isinstance(otherwise, str): otherwise = np.repeat(otherwise, len(condition)) assert (len(condition) == len(when_true)) and (len(condition) == len(otherwise)) if isinstance(when_true, pd.Series): when_true = when_true.values if isinstance(otherwise, pd.Series): otherwise = otherwise.values output = np.array([when_true[i] if c else otherwise[i] for i, c in enumerate(condition)]) return output
python
def if_else(condition, when_true, otherwise): """ Wraps creation of a series based on if-else conditional logic into a function call. Provide a boolean vector condition, value(s) when true, and value(s) when false, and a vector will be returned the same length as the conditional vector according to the logical statement. Args: condition: A boolean vector representing the condition. This is often a logical statement with a symbolic series. when_true: A vector the same length as the condition vector or a single value to apply when the condition is `True`. otherwise: A vector the same length as the condition vector or a single value to apply when the condition is `False`. Example: df = pd.DataFrame """ if not isinstance(when_true, collections.Iterable) or isinstance(when_true, str): when_true = np.repeat(when_true, len(condition)) if not isinstance(otherwise, collections.Iterable) or isinstance(otherwise, str): otherwise = np.repeat(otherwise, len(condition)) assert (len(condition) == len(when_true)) and (len(condition) == len(otherwise)) if isinstance(when_true, pd.Series): when_true = when_true.values if isinstance(otherwise, pd.Series): otherwise = otherwise.values output = np.array([when_true[i] if c else otherwise[i] for i, c in enumerate(condition)]) return output
[ "def", "if_else", "(", "condition", ",", "when_true", ",", "otherwise", ")", ":", "if", "not", "isinstance", "(", "when_true", ",", "collections", ".", "Iterable", ")", "or", "isinstance", "(", "when_true", ",", "str", ")", ":", "when_true", "=", "np", "...
Wraps creation of a series based on if-else conditional logic into a function call. Provide a boolean vector condition, value(s) when true, and value(s) when false, and a vector will be returned the same length as the conditional vector according to the logical statement. Args: condition: A boolean vector representing the condition. This is often a logical statement with a symbolic series. when_true: A vector the same length as the condition vector or a single value to apply when the condition is `True`. otherwise: A vector the same length as the condition vector or a single value to apply when the condition is `False`. Example: df = pd.DataFrame
[ "Wraps", "creation", "of", "a", "series", "based", "on", "if", "-", "else", "conditional", "logic", "into", "a", "function", "call", "." ]
6a858f066602735a90f8b6b85106bc39ceadc282
https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/vector.py#L197-L231
223,008
kieferk/dfply
dfply/vector.py
na_if
def na_if(series, *values): """ If values in a series match a specified value, change them to `np.nan`. Args: series: Series or vector, often symbolic. *values: Value(s) to convert to `np.nan` in the series. """ series = pd.Series(series) series[series.isin(values)] = np.nan return series
python
def na_if(series, *values): """ If values in a series match a specified value, change them to `np.nan`. Args: series: Series or vector, often symbolic. *values: Value(s) to convert to `np.nan` in the series. """ series = pd.Series(series) series[series.isin(values)] = np.nan return series
[ "def", "na_if", "(", "series", ",", "*", "values", ")", ":", "series", "=", "pd", ".", "Series", "(", "series", ")", "series", "[", "series", ".", "isin", "(", "values", ")", "]", "=", "np", ".", "nan", "return", "series" ]
If values in a series match a specified value, change them to `np.nan`. Args: series: Series or vector, often symbolic. *values: Value(s) to convert to `np.nan` in the series.
[ "If", "values", "in", "a", "series", "match", "a", "specified", "value", "change", "them", "to", "np", ".", "nan", "." ]
6a858f066602735a90f8b6b85106bc39ceadc282
https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/vector.py#L239-L250
223,009
kieferk/dfply
dfply/window_functions.py
between
def between(series, a, b, inclusive=False): """ Returns a boolean series specifying whether rows of the input series are between values `a` and `b`. Args: series: column to compare, typically symbolic. a: value series must be greater than (or equal to if `inclusive=True`) for the output series to be `True` at that position. b: value series must be less than (or equal to if `inclusive=True`) for the output series to be `True` at that position. Kwargs: inclusive (bool): If `True`, comparison is done with `>=` and `<=`. If `False` (the default), comparison uses `>` and `<`. """ if inclusive == True: met_condition = (series >= a) & (series <= b) elif inclusive == False: met_condition = (series > a) & (series < b) return met_condition
python
def between(series, a, b, inclusive=False): """ Returns a boolean series specifying whether rows of the input series are between values `a` and `b`. Args: series: column to compare, typically symbolic. a: value series must be greater than (or equal to if `inclusive=True`) for the output series to be `True` at that position. b: value series must be less than (or equal to if `inclusive=True`) for the output series to be `True` at that position. Kwargs: inclusive (bool): If `True`, comparison is done with `>=` and `<=`. If `False` (the default), comparison uses `>` and `<`. """ if inclusive == True: met_condition = (series >= a) & (series <= b) elif inclusive == False: met_condition = (series > a) & (series < b) return met_condition
[ "def", "between", "(", "series", ",", "a", ",", "b", ",", "inclusive", "=", "False", ")", ":", "if", "inclusive", "==", "True", ":", "met_condition", "=", "(", "series", ">=", "a", ")", "&", "(", "series", "<=", "b", ")", "elif", "inclusive", "==",...
Returns a boolean series specifying whether rows of the input series are between values `a` and `b`. Args: series: column to compare, typically symbolic. a: value series must be greater than (or equal to if `inclusive=True`) for the output series to be `True` at that position. b: value series must be less than (or equal to if `inclusive=True`) for the output series to be `True` at that position. Kwargs: inclusive (bool): If `True`, comparison is done with `>=` and `<=`. If `False` (the default), comparison uses `>` and `<`.
[ "Returns", "a", "boolean", "series", "specifying", "whether", "rows", "of", "the", "input", "series", "are", "between", "values", "a", "and", "b", "." ]
6a858f066602735a90f8b6b85106bc39ceadc282
https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/window_functions.py#L43-L64
223,010
euske/pdfminer
pdfminer/psparser.py
PSBaseParser.seek
def seek(self, pos): """Seeks the parser to the given position. """ if self.debug: logging.debug('seek: %r' % pos) self.fp.seek(pos) # reset the status for nextline() self.bufpos = pos self.buf = b'' self.charpos = 0 # reset the status for nexttoken() self._parse1 = self._parse_main self._curtoken = b'' self._curtokenpos = 0 self._tokens = [] return
python
def seek(self, pos): """Seeks the parser to the given position. """ if self.debug: logging.debug('seek: %r' % pos) self.fp.seek(pos) # reset the status for nextline() self.bufpos = pos self.buf = b'' self.charpos = 0 # reset the status for nexttoken() self._parse1 = self._parse_main self._curtoken = b'' self._curtokenpos = 0 self._tokens = [] return
[ "def", "seek", "(", "self", ",", "pos", ")", ":", "if", "self", ".", "debug", ":", "logging", ".", "debug", "(", "'seek: %r'", "%", "pos", ")", "self", ".", "fp", ".", "seek", "(", "pos", ")", "# reset the status for nextline()", "self", ".", "bufpos",...
Seeks the parser to the given position.
[ "Seeks", "the", "parser", "to", "the", "given", "position", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/psparser.py#L191-L206
223,011
euske/pdfminer
pdfminer/psparser.py
PSBaseParser.nextline
def nextline(self): """Fetches a next line that ends either with \\r or \\n. """ linebuf = b'' linepos = self.bufpos + self.charpos eol = False while 1: self.fillbuf() if eol: c = self.buf[self.charpos] # handle b'\r\n' if c == b'\n': linebuf += c self.charpos += 1 break m = EOL.search(self.buf, self.charpos) if m: linebuf += self.buf[self.charpos:m.end(0)] self.charpos = m.end(0) if linebuf[-1] == b'\r': eol = True else: break else: linebuf += self.buf[self.charpos:] self.charpos = len(self.buf) if self.debug: logging.debug('nextline: %r, %r' % (linepos, linebuf)) return (linepos, linebuf)
python
def nextline(self): """Fetches a next line that ends either with \\r or \\n. """ linebuf = b'' linepos = self.bufpos + self.charpos eol = False while 1: self.fillbuf() if eol: c = self.buf[self.charpos] # handle b'\r\n' if c == b'\n': linebuf += c self.charpos += 1 break m = EOL.search(self.buf, self.charpos) if m: linebuf += self.buf[self.charpos:m.end(0)] self.charpos = m.end(0) if linebuf[-1] == b'\r': eol = True else: break else: linebuf += self.buf[self.charpos:] self.charpos = len(self.buf) if self.debug: logging.debug('nextline: %r, %r' % (linepos, linebuf)) return (linepos, linebuf)
[ "def", "nextline", "(", "self", ")", ":", "linebuf", "=", "b''", "linepos", "=", "self", ".", "bufpos", "+", "self", ".", "charpos", "eol", "=", "False", "while", "1", ":", "self", ".", "fillbuf", "(", ")", "if", "eol", ":", "c", "=", "self", "."...
Fetches a next line that ends either with \\r or \\n.
[ "Fetches", "a", "next", "line", "that", "ends", "either", "with", "\\\\", "r", "or", "\\\\", "n", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/psparser.py#L219-L247
223,012
euske/pdfminer
pdfminer/psparser.py
PSBaseParser.revreadlines
def revreadlines(self): """Fetches a next line backward. This is used to locate the trailers at the end of a file. """ self.fp.seek(0, 2) pos = self.fp.tell() buf = b'' while 0 < pos: prevpos = pos pos = max(0, pos-self.BUFSIZ) self.fp.seek(pos) s = self.fp.read(prevpos-pos) if not s: break while 1: n = max(s.rfind(b'\r'), s.rfind(b'\n')) if n == -1: buf = s + buf break yield s[n:]+buf s = s[:n] buf = b'' return
python
def revreadlines(self): """Fetches a next line backward. This is used to locate the trailers at the end of a file. """ self.fp.seek(0, 2) pos = self.fp.tell() buf = b'' while 0 < pos: prevpos = pos pos = max(0, pos-self.BUFSIZ) self.fp.seek(pos) s = self.fp.read(prevpos-pos) if not s: break while 1: n = max(s.rfind(b'\r'), s.rfind(b'\n')) if n == -1: buf = s + buf break yield s[n:]+buf s = s[:n] buf = b'' return
[ "def", "revreadlines", "(", "self", ")", ":", "self", ".", "fp", ".", "seek", "(", "0", ",", "2", ")", "pos", "=", "self", ".", "fp", ".", "tell", "(", ")", "buf", "=", "b''", "while", "0", "<", "pos", ":", "prevpos", "=", "pos", "pos", "=", ...
Fetches a next line backward. This is used to locate the trailers at the end of a file.
[ "Fetches", "a", "next", "line", "backward", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/psparser.py#L249-L272
223,013
euske/pdfminer
pdfminer/psparser.py
PSStackParser.nextobject
def nextobject(self): """Yields a list of objects. Returns keywords, literals, strings, numbers, arrays and dictionaries. Arrays and dictionaries are represented as Python lists and dictionaries. """ while not self.results: (pos, token) = self.nexttoken() #print (pos,token), (self.curtype, self.curstack) if isinstance(token, (int, long, float, bool, str, PSLiteral)): # normal token self.push((pos, token)) elif token == KEYWORD_ARRAY_BEGIN: # begin array self.start_type(pos, 'a') elif token == KEYWORD_ARRAY_END: # end array try: self.push(self.end_type('a')) except PSTypeError: if STRICT: raise elif token == KEYWORD_DICT_BEGIN: # begin dictionary self.start_type(pos, 'd') elif token == KEYWORD_DICT_END: # end dictionary try: (pos, objs) = self.end_type('d') if len(objs) % 2 != 0: raise PSSyntaxError('Invalid dictionary construct: %r' % (objs,)) # construct a Python dictionary. d = dict((literal_name(k), v) for (k, v) in choplist(2, objs) if v is not None) self.push((pos, d)) except PSTypeError: if STRICT: raise elif token == KEYWORD_PROC_BEGIN: # begin proc self.start_type(pos, 'p') elif token == KEYWORD_PROC_END: # end proc try: self.push(self.end_type('p')) except PSTypeError: if STRICT: raise else: if self.debug: logging.debug('do_keyword: pos=%r, token=%r, stack=%r' % \ (pos, token, self.curstack)) self.do_keyword(pos, token) if self.context: continue else: self.flush() obj = self.results.pop(0) if self.debug: logging.debug('nextobject: %r' % (obj,)) return obj
python
def nextobject(self): """Yields a list of objects. Returns keywords, literals, strings, numbers, arrays and dictionaries. Arrays and dictionaries are represented as Python lists and dictionaries. """ while not self.results: (pos, token) = self.nexttoken() #print (pos,token), (self.curtype, self.curstack) if isinstance(token, (int, long, float, bool, str, PSLiteral)): # normal token self.push((pos, token)) elif token == KEYWORD_ARRAY_BEGIN: # begin array self.start_type(pos, 'a') elif token == KEYWORD_ARRAY_END: # end array try: self.push(self.end_type('a')) except PSTypeError: if STRICT: raise elif token == KEYWORD_DICT_BEGIN: # begin dictionary self.start_type(pos, 'd') elif token == KEYWORD_DICT_END: # end dictionary try: (pos, objs) = self.end_type('d') if len(objs) % 2 != 0: raise PSSyntaxError('Invalid dictionary construct: %r' % (objs,)) # construct a Python dictionary. d = dict((literal_name(k), v) for (k, v) in choplist(2, objs) if v is not None) self.push((pos, d)) except PSTypeError: if STRICT: raise elif token == KEYWORD_PROC_BEGIN: # begin proc self.start_type(pos, 'p') elif token == KEYWORD_PROC_END: # end proc try: self.push(self.end_type('p')) except PSTypeError: if STRICT: raise else: if self.debug: logging.debug('do_keyword: pos=%r, token=%r, stack=%r' % \ (pos, token, self.curstack)) self.do_keyword(pos, token) if self.context: continue else: self.flush() obj = self.results.pop(0) if self.debug: logging.debug('nextobject: %r' % (obj,)) return obj
[ "def", "nextobject", "(", "self", ")", ":", "while", "not", "self", ".", "results", ":", "(", "pos", ",", "token", ")", "=", "self", ".", "nexttoken", "(", ")", "#print (pos,token), (self.curtype, self.curstack)", "if", "isinstance", "(", "token", ",", "(", ...
Yields a list of objects. Returns keywords, literals, strings, numbers, arrays and dictionaries. Arrays and dictionaries are represented as Python lists and dictionaries.
[ "Yields", "a", "list", "of", "objects", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/psparser.py#L567-L626
223,014
euske/pdfminer
pdfminer/encodingdb.py
name2unicode
def name2unicode(name): """Converts Adobe glyph names to Unicode numbers.""" if name in glyphname2unicode: return glyphname2unicode[name] m = STRIP_NAME.search(name) if not m: raise KeyError(name) return unichr(int(m.group(0)))
python
def name2unicode(name): """Converts Adobe glyph names to Unicode numbers.""" if name in glyphname2unicode: return glyphname2unicode[name] m = STRIP_NAME.search(name) if not m: raise KeyError(name) return unichr(int(m.group(0)))
[ "def", "name2unicode", "(", "name", ")", ":", "if", "name", "in", "glyphname2unicode", ":", "return", "glyphname2unicode", "[", "name", "]", "m", "=", "STRIP_NAME", ".", "search", "(", "name", ")", "if", "not", "m", ":", "raise", "KeyError", "(", "name",...
Converts Adobe glyph names to Unicode numbers.
[ "Converts", "Adobe", "glyph", "names", "to", "Unicode", "numbers", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/encodingdb.py#L13-L20
223,015
euske/pdfminer
pdfminer/pdftypes.py
resolve1
def resolve1(x, default=None): """Resolves an object. If this is an array or dictionary, it may still contains some indirect objects inside. """ while isinstance(x, PDFObjRef): x = x.resolve(default=default) return x
python
def resolve1(x, default=None): """Resolves an object. If this is an array or dictionary, it may still contains some indirect objects inside. """ while isinstance(x, PDFObjRef): x = x.resolve(default=default) return x
[ "def", "resolve1", "(", "x", ",", "default", "=", "None", ")", ":", "while", "isinstance", "(", "x", ",", "PDFObjRef", ")", ":", "x", "=", "x", ".", "resolve", "(", "default", "=", "default", ")", "return", "x" ]
Resolves an object. If this is an array or dictionary, it may still contains some indirect objects inside.
[ "Resolves", "an", "object", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/pdftypes.py#L73-L81
223,016
euske/pdfminer
pdfminer/pdftypes.py
resolve_all
def resolve_all(x, default=None): """Recursively resolves the given object and all the internals. Make sure there is no indirect reference within the nested object. This procedure might be slow. """ while isinstance(x, PDFObjRef): x = x.resolve(default=default) if isinstance(x, list): x = [resolve_all(v, default=default) for v in x] elif isinstance(x, dict): for (k, v) in x.iteritems(): x[k] = resolve_all(v, default=default) return x
python
def resolve_all(x, default=None): """Recursively resolves the given object and all the internals. Make sure there is no indirect reference within the nested object. This procedure might be slow. """ while isinstance(x, PDFObjRef): x = x.resolve(default=default) if isinstance(x, list): x = [resolve_all(v, default=default) for v in x] elif isinstance(x, dict): for (k, v) in x.iteritems(): x[k] = resolve_all(v, default=default) return x
[ "def", "resolve_all", "(", "x", ",", "default", "=", "None", ")", ":", "while", "isinstance", "(", "x", ",", "PDFObjRef", ")", ":", "x", "=", "x", ".", "resolve", "(", "default", "=", "default", ")", "if", "isinstance", "(", "x", ",", "list", ")", ...
Recursively resolves the given object and all the internals. Make sure there is no indirect reference within the nested object. This procedure might be slow.
[ "Recursively", "resolves", "the", "given", "object", "and", "all", "the", "internals", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/pdftypes.py#L84-L97
223,017
euske/pdfminer
pdfminer/pdftypes.py
decipher_all
def decipher_all(decipher, objid, genno, x): """Recursively deciphers the given object. """ if isinstance(x, str): return decipher(objid, genno, x) if isinstance(x, list): x = [decipher_all(decipher, objid, genno, v) for v in x] elif isinstance(x, dict): for (k, v) in x.iteritems(): x[k] = decipher_all(decipher, objid, genno, v) return x
python
def decipher_all(decipher, objid, genno, x): """Recursively deciphers the given object. """ if isinstance(x, str): return decipher(objid, genno, x) if isinstance(x, list): x = [decipher_all(decipher, objid, genno, v) for v in x] elif isinstance(x, dict): for (k, v) in x.iteritems(): x[k] = decipher_all(decipher, objid, genno, v) return x
[ "def", "decipher_all", "(", "decipher", ",", "objid", ",", "genno", ",", "x", ")", ":", "if", "isinstance", "(", "x", ",", "str", ")", ":", "return", "decipher", "(", "objid", ",", "genno", ",", "x", ")", "if", "isinstance", "(", "x", ",", "list", ...
Recursively deciphers the given object.
[ "Recursively", "deciphers", "the", "given", "object", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/pdftypes.py#L100-L110
223,018
euske/pdfminer
pdfminer/pdfdocument.py
PDFDocument.find_xref
def find_xref(self, parser): """Internal function used to locate the first XRef.""" # search the last xref table by scanning the file backwards. prev = None for line in parser.revreadlines(): line = line.strip() if self.debug: logging.debug('find_xref: %r' % line) if line == b'startxref': break if line: prev = line else: raise PDFNoValidXRef('Unexpected EOF') if self.debug: logging.info('xref found: pos=%r' % prev) return long(prev)
python
def find_xref(self, parser): """Internal function used to locate the first XRef.""" # search the last xref table by scanning the file backwards. prev = None for line in parser.revreadlines(): line = line.strip() if self.debug: logging.debug('find_xref: %r' % line) if line == b'startxref': break if line: prev = line else: raise PDFNoValidXRef('Unexpected EOF') if self.debug: logging.info('xref found: pos=%r' % prev) return long(prev)
[ "def", "find_xref", "(", "self", ",", "parser", ")", ":", "# search the last xref table by scanning the file backwards.", "prev", "=", "None", "for", "line", "in", "parser", ".", "revreadlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if",...
Internal function used to locate the first XRef.
[ "Internal", "function", "used", "to", "locate", "the", "first", "XRef", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/pdfdocument.py#L755-L771
223,019
euske/pdfminer
pdfminer/pdfdocument.py
PDFDocument.read_xref_from
def read_xref_from(self, parser, start, xrefs): """Reads XRefs from the given location.""" parser.seek(start) parser.reset() try: (pos, token) = parser.nexttoken() except PSEOF: raise PDFNoValidXRef('Unexpected EOF') if self.debug: logging.info('read_xref_from: start=%d, token=%r' % (start, token)) if isinstance(token, int): # XRefStream: PDF-1.5 parser.seek(pos) parser.reset() xref = PDFXRefStream() xref.load(parser) else: if token is parser.KEYWORD_XREF: parser.nextline() xref = PDFXRef() xref.load(parser) xrefs.append(xref) trailer = xref.get_trailer() if self.debug: logging.info('trailer: %r' % trailer) if 'XRefStm' in trailer: pos = int_value(trailer['XRefStm']) self.read_xref_from(parser, pos, xrefs) if 'Prev' in trailer: # find previous xref pos = int_value(trailer['Prev']) self.read_xref_from(parser, pos, xrefs) return
python
def read_xref_from(self, parser, start, xrefs): """Reads XRefs from the given location.""" parser.seek(start) parser.reset() try: (pos, token) = parser.nexttoken() except PSEOF: raise PDFNoValidXRef('Unexpected EOF') if self.debug: logging.info('read_xref_from: start=%d, token=%r' % (start, token)) if isinstance(token, int): # XRefStream: PDF-1.5 parser.seek(pos) parser.reset() xref = PDFXRefStream() xref.load(parser) else: if token is parser.KEYWORD_XREF: parser.nextline() xref = PDFXRef() xref.load(parser) xrefs.append(xref) trailer = xref.get_trailer() if self.debug: logging.info('trailer: %r' % trailer) if 'XRefStm' in trailer: pos = int_value(trailer['XRefStm']) self.read_xref_from(parser, pos, xrefs) if 'Prev' in trailer: # find previous xref pos = int_value(trailer['Prev']) self.read_xref_from(parser, pos, xrefs) return
[ "def", "read_xref_from", "(", "self", ",", "parser", ",", "start", ",", "xrefs", ")", ":", "parser", ".", "seek", "(", "start", ")", "parser", ".", "reset", "(", ")", "try", ":", "(", "pos", ",", "token", ")", "=", "parser", ".", "nexttoken", "(", ...
Reads XRefs from the given location.
[ "Reads", "XRefs", "from", "the", "given", "location", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/pdfdocument.py#L774-L806
223,020
euske/pdfminer
pdfminer/utils.py
mult_matrix
def mult_matrix(m1, m0): (a1, b1, c1, d1, e1, f1) = m1 (a0, b0, c0, d0, e0, f0) = m0 """Returns the multiplication of two matrices.""" return (a0*a1+c0*b1, b0*a1+d0*b1, a0*c1+c0*d1, b0*c1+d0*d1, a0*e1+c0*f1+e0, b0*e1+d0*f1+f0)
python
def mult_matrix(m1, m0): (a1, b1, c1, d1, e1, f1) = m1 (a0, b0, c0, d0, e0, f0) = m0 """Returns the multiplication of two matrices.""" return (a0*a1+c0*b1, b0*a1+d0*b1, a0*c1+c0*d1, b0*c1+d0*d1, a0*e1+c0*f1+e0, b0*e1+d0*f1+f0)
[ "def", "mult_matrix", "(", "m1", ",", "m0", ")", ":", "(", "a1", ",", "b1", ",", "c1", ",", "d1", ",", "e1", ",", "f1", ")", "=", "m1", "(", "a0", ",", "b0", ",", "c0", ",", "d0", ",", "e0", ",", "f0", ")", "=", "m0", "return", "(", "a0...
Returns the multiplication of two matrices.
[ "Returns", "the", "multiplication", "of", "two", "matrices", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L57-L63
223,021
euske/pdfminer
pdfminer/utils.py
uniq
def uniq(objs): """Eliminates duplicated elements.""" done = set() for obj in objs: if obj in done: continue done.add(obj) yield obj return
python
def uniq(objs): """Eliminates duplicated elements.""" done = set() for obj in objs: if obj in done: continue done.add(obj) yield obj return
[ "def", "uniq", "(", "objs", ")", ":", "done", "=", "set", "(", ")", "for", "obj", "in", "objs", ":", "if", "obj", "in", "done", ":", "continue", "done", ".", "add", "(", "obj", ")", "yield", "obj", "return" ]
Eliminates duplicated elements.
[ "Eliminates", "duplicated", "elements", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L95-L103
223,022
euske/pdfminer
pdfminer/utils.py
csort
def csort(objs, key): """Order-preserving sorting function.""" idxs = dict((obj, i) for (i, obj) in enumerate(objs)) return sorted(objs, key=lambda obj: (key(obj), idxs[obj]))
python
def csort(objs, key): """Order-preserving sorting function.""" idxs = dict((obj, i) for (i, obj) in enumerate(objs)) return sorted(objs, key=lambda obj: (key(obj), idxs[obj]))
[ "def", "csort", "(", "objs", ",", "key", ")", ":", "idxs", "=", "dict", "(", "(", "obj", ",", "i", ")", "for", "(", "i", ",", "obj", ")", "in", "enumerate", "(", "objs", ")", ")", "return", "sorted", "(", "objs", ",", "key", "=", "lambda", "o...
Order-preserving sorting function.
[ "Order", "-", "preserving", "sorting", "function", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L107-L110
223,023
euske/pdfminer
pdfminer/utils.py
fsplit
def fsplit(pred, objs): """Split a list into two classes according to the predicate.""" t = [] f = [] for obj in objs: if pred(obj): t.append(obj) else: f.append(obj) return (t, f)
python
def fsplit(pred, objs): """Split a list into two classes according to the predicate.""" t = [] f = [] for obj in objs: if pred(obj): t.append(obj) else: f.append(obj) return (t, f)
[ "def", "fsplit", "(", "pred", ",", "objs", ")", ":", "t", "=", "[", "]", "f", "=", "[", "]", "for", "obj", "in", "objs", ":", "if", "pred", "(", "obj", ")", ":", "t", ".", "append", "(", "obj", ")", "else", ":", "f", ".", "append", "(", "...
Split a list into two classes according to the predicate.
[ "Split", "a", "list", "into", "two", "classes", "according", "to", "the", "predicate", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L114-L123
223,024
euske/pdfminer
pdfminer/utils.py
drange
def drange(v0, v1, d): """Returns a discrete range.""" assert v0 < v1 return xrange(int(v0)//d, int(v1+d)//d)
python
def drange(v0, v1, d): """Returns a discrete range.""" assert v0 < v1 return xrange(int(v0)//d, int(v1+d)//d)
[ "def", "drange", "(", "v0", ",", "v1", ",", "d", ")", ":", "assert", "v0", "<", "v1", "return", "xrange", "(", "int", "(", "v0", ")", "//", "d", ",", "int", "(", "v1", "+", "d", ")", "//", "d", ")" ]
Returns a discrete range.
[ "Returns", "a", "discrete", "range", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L127-L130
223,025
euske/pdfminer
pdfminer/utils.py
get_bound
def get_bound(pts): """Compute a minimal rectangle that covers all the points.""" (x0, y0, x1, y1) = (INF, INF, -INF, -INF) for (x, y) in pts: x0 = min(x0, x) y0 = min(y0, y) x1 = max(x1, x) y1 = max(y1, y) return (x0, y0, x1, y1)
python
def get_bound(pts): """Compute a minimal rectangle that covers all the points.""" (x0, y0, x1, y1) = (INF, INF, -INF, -INF) for (x, y) in pts: x0 = min(x0, x) y0 = min(y0, y) x1 = max(x1, x) y1 = max(y1, y) return (x0, y0, x1, y1)
[ "def", "get_bound", "(", "pts", ")", ":", "(", "x0", ",", "y0", ",", "x1", ",", "y1", ")", "=", "(", "INF", ",", "INF", ",", "-", "INF", ",", "-", "INF", ")", "for", "(", "x", ",", "y", ")", "in", "pts", ":", "x0", "=", "min", "(", "x0"...
Compute a minimal rectangle that covers all the points.
[ "Compute", "a", "minimal", "rectangle", "that", "covers", "all", "the", "points", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L134-L142
223,026
euske/pdfminer
pdfminer/utils.py
choplist
def choplist(n, seq): """Groups every n elements of the list.""" r = [] for x in seq: r.append(x) if len(r) == n: yield tuple(r) r = [] return
python
def choplist(n, seq): """Groups every n elements of the list.""" r = [] for x in seq: r.append(x) if len(r) == n: yield tuple(r) r = [] return
[ "def", "choplist", "(", "n", ",", "seq", ")", ":", "r", "=", "[", "]", "for", "x", "in", "seq", ":", "r", ".", "append", "(", "x", ")", "if", "len", "(", "r", ")", "==", "n", ":", "yield", "tuple", "(", "r", ")", "r", "=", "[", "]", "re...
Groups every n elements of the list.
[ "Groups", "every", "n", "elements", "of", "the", "list", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L157-L165
223,027
euske/pdfminer
pdfminer/utils.py
decode_text
def decode_text(s): """Decodes a PDFDocEncoding string to Unicode.""" if s.startswith(b'\xfe\xff'): return unicode(s[2:], 'utf-16be', 'ignore') else: return ''.join(PDFDocEncoding[ord(c)] for c in s)
python
def decode_text(s): """Decodes a PDFDocEncoding string to Unicode.""" if s.startswith(b'\xfe\xff'): return unicode(s[2:], 'utf-16be', 'ignore') else: return ''.join(PDFDocEncoding[ord(c)] for c in s)
[ "def", "decode_text", "(", "s", ")", ":", "if", "s", ".", "startswith", "(", "b'\\xfe\\xff'", ")", ":", "return", "unicode", "(", "s", "[", "2", ":", "]", ",", "'utf-16be'", ",", "'ignore'", ")", "else", ":", "return", "''", ".", "join", "(", "PDFD...
Decodes a PDFDocEncoding string to Unicode.
[ "Decodes", "a", "PDFDocEncoding", "string", "to", "Unicode", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L223-L228
223,028
euske/pdfminer
pdfminer/pdfparser.py
PDFParser.do_keyword
def do_keyword(self, pos, token): """Handles PDF-related keywords.""" if token in (self.KEYWORD_XREF, self.KEYWORD_STARTXREF): self.add_results(*self.pop(1)) elif token is self.KEYWORD_ENDOBJ: self.add_results(*self.pop(4)) elif token is self.KEYWORD_NULL: # null object self.push((pos, None)) elif token is self.KEYWORD_R: # reference to indirect object try: ((_, objid), (_, genno)) = self.pop(2) (objid, genno) = (int(objid), int(genno)) obj = PDFObjRef(self.doc, objid, genno) self.push((pos, obj)) except PSSyntaxError: pass elif token is self.KEYWORD_STREAM: # stream object ((_, dic),) = self.pop(1) dic = dict_value(dic) objlen = 0 if not self.fallback: try: objlen = int_value(dic['Length']) except KeyError: if STRICT: raise PDFSyntaxError('/Length is undefined: %r' % dic) self.seek(pos) try: (_, line) = self.nextline() # 'stream' except PSEOF: if STRICT: raise PDFSyntaxError('Unexpected EOF') return pos += len(line) self.fp.seek(pos) data = self.fp.read(objlen) self.seek(pos+objlen) while 1: try: (linepos, line) = self.nextline() except PSEOF: if STRICT: raise PDFSyntaxError('Unexpected EOF') break if b'endstream' in line: i = line.index(b'endstream') objlen += i if self.fallback: data += line[:i] break objlen += len(line) if self.fallback: data += line self.seek(pos+objlen) # XXX limit objlen not to exceed object boundary if self.debug: logging.debug('Stream: pos=%d, objlen=%d, dic=%r, data=%r...' % \ (pos, objlen, dic, data[:10])) obj = PDFStream(dic, data, self.doc.decipher) self.push((pos, obj)) else: # others self.push((pos, token)) return
python
def do_keyword(self, pos, token): """Handles PDF-related keywords.""" if token in (self.KEYWORD_XREF, self.KEYWORD_STARTXREF): self.add_results(*self.pop(1)) elif token is self.KEYWORD_ENDOBJ: self.add_results(*self.pop(4)) elif token is self.KEYWORD_NULL: # null object self.push((pos, None)) elif token is self.KEYWORD_R: # reference to indirect object try: ((_, objid), (_, genno)) = self.pop(2) (objid, genno) = (int(objid), int(genno)) obj = PDFObjRef(self.doc, objid, genno) self.push((pos, obj)) except PSSyntaxError: pass elif token is self.KEYWORD_STREAM: # stream object ((_, dic),) = self.pop(1) dic = dict_value(dic) objlen = 0 if not self.fallback: try: objlen = int_value(dic['Length']) except KeyError: if STRICT: raise PDFSyntaxError('/Length is undefined: %r' % dic) self.seek(pos) try: (_, line) = self.nextline() # 'stream' except PSEOF: if STRICT: raise PDFSyntaxError('Unexpected EOF') return pos += len(line) self.fp.seek(pos) data = self.fp.read(objlen) self.seek(pos+objlen) while 1: try: (linepos, line) = self.nextline() except PSEOF: if STRICT: raise PDFSyntaxError('Unexpected EOF') break if b'endstream' in line: i = line.index(b'endstream') objlen += i if self.fallback: data += line[:i] break objlen += len(line) if self.fallback: data += line self.seek(pos+objlen) # XXX limit objlen not to exceed object boundary if self.debug: logging.debug('Stream: pos=%d, objlen=%d, dic=%r, data=%r...' % \ (pos, objlen, dic, data[:10])) obj = PDFStream(dic, data, self.doc.decipher) self.push((pos, obj)) else: # others self.push((pos, token)) return
[ "def", "do_keyword", "(", "self", ",", "pos", ",", "token", ")", ":", "if", "token", "in", "(", "self", ".", "KEYWORD_XREF", ",", "self", ".", "KEYWORD_STARTXREF", ")", ":", "self", ".", "add_results", "(", "*", "self", ".", "pop", "(", "1", ")", "...
Handles PDF-related keywords.
[ "Handles", "PDF", "-", "related", "keywords", "." ]
8150458718e9024c80b00e74965510b20206e588
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/pdfparser.py#L60-L133
223,029
eliangcs/http-prompt
http_prompt/execution.py
generate_help_text
def generate_help_text(): """Return a formatted string listing commands, HTTPie options, and HTTP actions. """ def generate_cmds_with_explanations(summary, cmds): text = '{0}:\n'.format(summary) for cmd, explanation in cmds: text += '\t{0:<10}\t{1:<20}\n'.format(cmd, explanation) return text + '\n' text = generate_cmds_with_explanations('Commands', ROOT_COMMANDS.items()) text += generate_cmds_with_explanations('Options', OPTION_NAMES.items()) text += generate_cmds_with_explanations('Actions', ACTIONS.items()) text += generate_cmds_with_explanations('Headers', HEADER_NAMES.items()) return text
python
def generate_help_text(): """Return a formatted string listing commands, HTTPie options, and HTTP actions. """ def generate_cmds_with_explanations(summary, cmds): text = '{0}:\n'.format(summary) for cmd, explanation in cmds: text += '\t{0:<10}\t{1:<20}\n'.format(cmd, explanation) return text + '\n' text = generate_cmds_with_explanations('Commands', ROOT_COMMANDS.items()) text += generate_cmds_with_explanations('Options', OPTION_NAMES.items()) text += generate_cmds_with_explanations('Actions', ACTIONS.items()) text += generate_cmds_with_explanations('Headers', HEADER_NAMES.items()) return text
[ "def", "generate_help_text", "(", ")", ":", "def", "generate_cmds_with_explanations", "(", "summary", ",", "cmds", ")", ":", "text", "=", "'{0}:\\n'", ".", "format", "(", "summary", ")", "for", "cmd", ",", "explanation", "in", "cmds", ":", "text", "+=", "'...
Return a formatted string listing commands, HTTPie options, and HTTP actions.
[ "Return", "a", "formatted", "string", "listing", "commands", "HTTPie", "options", "and", "HTTP", "actions", "." ]
189321f25e3526fa1b79a9dc38c317892c478986
https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/execution.py#L154-L168
223,030
eliangcs/http-prompt
http_prompt/utils.py
colformat
def colformat(strings, num_sep_spaces=1, terminal_width=None): """Format a list of strings like ls does multi-column output.""" if terminal_width is None: terminal_width = get_terminal_size().columns if not strings: return num_items = len(strings) max_len = max([len(strip_ansi_escapes(s)) for s in strings]) num_columns = min( int((terminal_width + num_sep_spaces) / (max_len + num_sep_spaces)), num_items) num_columns = max(1, num_columns) num_lines = int(math.ceil(float(num_items) / num_columns)) num_columns = int(math.ceil(float(num_items) / num_lines)) num_elements_last_column = num_items % num_lines if num_elements_last_column == 0: num_elements_last_column = num_lines lines = [] for i in range(num_lines): line_size = num_columns if i >= num_elements_last_column: line_size -= 1 lines.append([None] * line_size) for i, line in enumerate(lines): line_size = len(line) for j in range(line_size): k = i + num_lines * j item = strings[k] if j % line_size != line_size - 1: item_len = len(strip_ansi_escapes(item)) item = item + ' ' * (max_len - item_len) line[j] = item sep = ' ' * num_sep_spaces for line in lines: yield sep.join(line)
python
def colformat(strings, num_sep_spaces=1, terminal_width=None): """Format a list of strings like ls does multi-column output.""" if terminal_width is None: terminal_width = get_terminal_size().columns if not strings: return num_items = len(strings) max_len = max([len(strip_ansi_escapes(s)) for s in strings]) num_columns = min( int((terminal_width + num_sep_spaces) / (max_len + num_sep_spaces)), num_items) num_columns = max(1, num_columns) num_lines = int(math.ceil(float(num_items) / num_columns)) num_columns = int(math.ceil(float(num_items) / num_lines)) num_elements_last_column = num_items % num_lines if num_elements_last_column == 0: num_elements_last_column = num_lines lines = [] for i in range(num_lines): line_size = num_columns if i >= num_elements_last_column: line_size -= 1 lines.append([None] * line_size) for i, line in enumerate(lines): line_size = len(line) for j in range(line_size): k = i + num_lines * j item = strings[k] if j % line_size != line_size - 1: item_len = len(strip_ansi_escapes(item)) item = item + ' ' * (max_len - item_len) line[j] = item sep = ' ' * num_sep_spaces for line in lines: yield sep.join(line)
[ "def", "colformat", "(", "strings", ",", "num_sep_spaces", "=", "1", ",", "terminal_width", "=", "None", ")", ":", "if", "terminal_width", "is", "None", ":", "terminal_width", "=", "get_terminal_size", "(", ")", ".", "columns", "if", "not", "strings", ":", ...
Format a list of strings like ls does multi-column output.
[ "Format", "a", "list", "of", "strings", "like", "ls", "does", "multi", "-", "column", "output", "." ]
189321f25e3526fa1b79a9dc38c317892c478986
https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/utils.py#L47-L89
223,031
eliangcs/http-prompt
http_prompt/contextio.py
load_context
def load_context(context, file_path=None): """Load a Context object in place from user data directory.""" if not file_path: file_path = _get_context_filepath() if os.path.exists(file_path): with io.open(file_path, encoding='utf-8') as f: for line in f: execute(line, context)
python
def load_context(context, file_path=None): """Load a Context object in place from user data directory.""" if not file_path: file_path = _get_context_filepath() if os.path.exists(file_path): with io.open(file_path, encoding='utf-8') as f: for line in f: execute(line, context)
[ "def", "load_context", "(", "context", ",", "file_path", "=", "None", ")", ":", "if", "not", "file_path", ":", "file_path", "=", "_get_context_filepath", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "with", "io", ".", "...
Load a Context object in place from user data directory.
[ "Load", "a", "Context", "object", "in", "place", "from", "user", "data", "directory", "." ]
189321f25e3526fa1b79a9dc38c317892c478986
https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/contextio.py#L23-L30
223,032
eliangcs/http-prompt
http_prompt/contextio.py
save_context
def save_context(context): """Save a Context object to user data directory.""" file_path = _get_context_filepath() content = format_to_http_prompt(context, excluded_options=EXCLUDED_OPTIONS) with io.open(file_path, 'w', encoding='utf-8') as f: f.write(content)
python
def save_context(context): """Save a Context object to user data directory.""" file_path = _get_context_filepath() content = format_to_http_prompt(context, excluded_options=EXCLUDED_OPTIONS) with io.open(file_path, 'w', encoding='utf-8') as f: f.write(content)
[ "def", "save_context", "(", "context", ")", ":", "file_path", "=", "_get_context_filepath", "(", ")", "content", "=", "format_to_http_prompt", "(", "context", ",", "excluded_options", "=", "EXCLUDED_OPTIONS", ")", "with", "io", ".", "open", "(", "file_path", ","...
Save a Context object to user data directory.
[ "Save", "a", "Context", "object", "to", "user", "data", "directory", "." ]
189321f25e3526fa1b79a9dc38c317892c478986
https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/contextio.py#L33-L38
223,033
eliangcs/http-prompt
http_prompt/context/transform.py
extract_args_for_httpie_main
def extract_args_for_httpie_main(context, method=None): """Transform a Context object to a list of arguments that can be passed to HTTPie main function. """ args = _extract_httpie_options(context) if method: args.append(method.upper()) args.append(context.url) args += _extract_httpie_request_items(context) return args
python
def extract_args_for_httpie_main(context, method=None): """Transform a Context object to a list of arguments that can be passed to HTTPie main function. """ args = _extract_httpie_options(context) if method: args.append(method.upper()) args.append(context.url) args += _extract_httpie_request_items(context) return args
[ "def", "extract_args_for_httpie_main", "(", "context", ",", "method", "=", "None", ")", ":", "args", "=", "_extract_httpie_options", "(", "context", ")", "if", "method", ":", "args", ".", "append", "(", "method", ".", "upper", "(", ")", ")", "args", ".", ...
Transform a Context object to a list of arguments that can be passed to HTTPie main function.
[ "Transform", "a", "Context", "object", "to", "a", "list", "of", "arguments", "that", "can", "be", "passed", "to", "HTTPie", "main", "function", "." ]
189321f25e3526fa1b79a9dc38c317892c478986
https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/context/transform.py#L73-L84
223,034
eliangcs/http-prompt
http_prompt/context/transform.py
format_to_httpie
def format_to_httpie(context, method=None): """Format a Context object to an HTTPie command.""" cmd = ['http'] + _extract_httpie_options(context, quote=True, join_key_value=True) if method: cmd.append(method.upper()) cmd.append(context.url) cmd += _extract_httpie_request_items(context, quote=True) return ' '.join(cmd) + '\n'
python
def format_to_httpie(context, method=None): """Format a Context object to an HTTPie command.""" cmd = ['http'] + _extract_httpie_options(context, quote=True, join_key_value=True) if method: cmd.append(method.upper()) cmd.append(context.url) cmd += _extract_httpie_request_items(context, quote=True) return ' '.join(cmd) + '\n'
[ "def", "format_to_httpie", "(", "context", ",", "method", "=", "None", ")", ":", "cmd", "=", "[", "'http'", "]", "+", "_extract_httpie_options", "(", "context", ",", "quote", "=", "True", ",", "join_key_value", "=", "True", ")", "if", "method", ":", "cmd...
Format a Context object to an HTTPie command.
[ "Format", "a", "Context", "object", "to", "an", "HTTPie", "command", "." ]
189321f25e3526fa1b79a9dc38c317892c478986
https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/context/transform.py#L97-L105
223,035
eliangcs/http-prompt
http_prompt/context/transform.py
format_to_http_prompt
def format_to_http_prompt(context, excluded_options=None): """Format a Context object to HTTP Prompt commands.""" cmds = _extract_httpie_options(context, quote=True, join_key_value=True, excluded_keys=excluded_options) cmds.append('cd ' + smart_quote(context.url)) cmds += _extract_httpie_request_items(context, quote=True) return '\n'.join(cmds) + '\n'
python
def format_to_http_prompt(context, excluded_options=None): """Format a Context object to HTTP Prompt commands.""" cmds = _extract_httpie_options(context, quote=True, join_key_value=True, excluded_keys=excluded_options) cmds.append('cd ' + smart_quote(context.url)) cmds += _extract_httpie_request_items(context, quote=True) return '\n'.join(cmds) + '\n'
[ "def", "format_to_http_prompt", "(", "context", ",", "excluded_options", "=", "None", ")", ":", "cmds", "=", "_extract_httpie_options", "(", "context", ",", "quote", "=", "True", ",", "join_key_value", "=", "True", ",", "excluded_keys", "=", "excluded_options", ...
Format a Context object to HTTP Prompt commands.
[ "Format", "a", "Context", "object", "to", "HTTP", "Prompt", "commands", "." ]
189321f25e3526fa1b79a9dc38c317892c478986
https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/context/transform.py#L108-L114
223,036
eliangcs/http-prompt
http_prompt/config.py
initialize
def initialize(): """Initialize a default config file if it doesn't exist yet. Returns: tuple: A tuple of (copied, dst_path). `copied` is a bool indicating if this function created the default config file. `dst_path` is the path of the user config file. """ dst_path = get_user_config_path() copied = False if not os.path.exists(dst_path): src_path = os.path.join(os.path.dirname(__file__), 'defaultconfig.py') shutil.copyfile(src_path, dst_path) copied = True return copied, dst_path
python
def initialize(): """Initialize a default config file if it doesn't exist yet. Returns: tuple: A tuple of (copied, dst_path). `copied` is a bool indicating if this function created the default config file. `dst_path` is the path of the user config file. """ dst_path = get_user_config_path() copied = False if not os.path.exists(dst_path): src_path = os.path.join(os.path.dirname(__file__), 'defaultconfig.py') shutil.copyfile(src_path, dst_path) copied = True return copied, dst_path
[ "def", "initialize", "(", ")", ":", "dst_path", "=", "get_user_config_path", "(", ")", "copied", "=", "False", "if", "not", "os", ".", "path", ".", "exists", "(", "dst_path", ")", ":", "src_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", ...
Initialize a default config file if it doesn't exist yet. Returns: tuple: A tuple of (copied, dst_path). `copied` is a bool indicating if this function created the default config file. `dst_path` is the path of the user config file.
[ "Initialize", "a", "default", "config", "file", "if", "it", "doesn", "t", "exist", "yet", "." ]
189321f25e3526fa1b79a9dc38c317892c478986
https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/config.py#L17-L31
223,037
eliangcs/http-prompt
http_prompt/config.py
load_user
def load_user(): """Read user config file and return it as a dict.""" config_path = get_user_config_path() config = {} # TODO: This may be overkill and too slow just for reading a config file with open(config_path) as f: code = compile(f.read(), config_path, 'exec') exec(code, config) keys = list(six.iterkeys(config)) for k in keys: if k.startswith('_'): del config[k] return config
python
def load_user(): """Read user config file and return it as a dict.""" config_path = get_user_config_path() config = {} # TODO: This may be overkill and too slow just for reading a config file with open(config_path) as f: code = compile(f.read(), config_path, 'exec') exec(code, config) keys = list(six.iterkeys(config)) for k in keys: if k.startswith('_'): del config[k] return config
[ "def", "load_user", "(", ")", ":", "config_path", "=", "get_user_config_path", "(", ")", "config", "=", "{", "}", "# TODO: This may be overkill and too slow just for reading a config file", "with", "open", "(", "config_path", ")", "as", "f", ":", "code", "=", "compi...
Read user config file and return it as a dict.
[ "Read", "user", "config", "file", "and", "return", "it", "as", "a", "dict", "." ]
189321f25e3526fa1b79a9dc38c317892c478986
https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/config.py#L48-L63
223,038
bcbio/bcbio-nextgen
bcbio/bam/fastq.py
filter_single_reads_by_length
def filter_single_reads_by_length(in_file, quality_format, min_length=20, out_file=None): """ removes reads from a fastq file which are shorter than a minimum length """ logger.info("Removing reads in %s thare are less than %d bases." % (in_file, min_length)) in_iterator = SeqIO.parse(in_file, quality_format) out_iterator = (record for record in in_iterator if len(record.seq) > min_length) with file_transaction(out_file) as tmp_out_file: with open(tmp_out_file, "w") as out_handle: SeqIO.write(out_iterator, out_handle, quality_format) return out_file
python
def filter_single_reads_by_length(in_file, quality_format, min_length=20, out_file=None): """ removes reads from a fastq file which are shorter than a minimum length """ logger.info("Removing reads in %s thare are less than %d bases." % (in_file, min_length)) in_iterator = SeqIO.parse(in_file, quality_format) out_iterator = (record for record in in_iterator if len(record.seq) > min_length) with file_transaction(out_file) as tmp_out_file: with open(tmp_out_file, "w") as out_handle: SeqIO.write(out_iterator, out_handle, quality_format) return out_file
[ "def", "filter_single_reads_by_length", "(", "in_file", ",", "quality_format", ",", "min_length", "=", "20", ",", "out_file", "=", "None", ")", ":", "logger", ".", "info", "(", "\"Removing reads in %s thare are less than %d bases.\"", "%", "(", "in_file", ",", "min_...
removes reads from a fastq file which are shorter than a minimum length
[ "removes", "reads", "from", "a", "fastq", "file", "which", "are", "shorter", "than", "a", "minimum", "length" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/fastq.py#L40-L55
223,039
bcbio/bcbio-nextgen
bcbio/bam/fastq.py
filter_reads_by_length
def filter_reads_by_length(fq1, fq2, quality_format, min_length=20): """ removes reads from a pair of fastq files that are shorter than a minimum length. removes both ends of a read if one end falls below the threshold while maintaining the order of the reads """ logger.info("Removing reads in %s and %s that " "are less than %d bases." % (fq1, fq2, min_length)) fq1_out = utils.append_stem(fq1, ".fixed") fq2_out = utils.append_stem(fq2, ".fixed") fq1_single = utils.append_stem(fq1, ".singles") fq2_single = utils.append_stem(fq2, ".singles") if all(map(utils.file_exists, [fq1_out, fq2_out, fq2_single, fq2_single])): return [fq1_out, fq2_out] fq1_in = SeqIO.parse(fq1, quality_format) fq2_in = SeqIO.parse(fq2, quality_format) out_files = [fq1_out, fq2_out, fq1_single, fq2_single] with file_transaction(out_files) as tmp_out_files: fq1_out_handle = open(tmp_out_files[0], "w") fq2_out_handle = open(tmp_out_files[1], "w") fq1_single_handle = open(tmp_out_files[2], "w") fq2_single_handle = open(tmp_out_files[3], "w") for fq1_record, fq2_record in zip(fq1_in, fq2_in): if len(fq1_record.seq) >= min_length and len(fq2_record.seq) >= min_length: fq1_out_handle.write(fq1_record.format(quality_format)) fq2_out_handle.write(fq2_record.format(quality_format)) else: if len(fq1_record.seq) > min_length: fq1_single_handle.write(fq1_record.format(quality_format)) if len(fq2_record.seq) > min_length: fq2_single_handle.write(fq2_record.format(quality_format)) fq1_out_handle.close() fq2_out_handle.close() fq1_single_handle.close() fq2_single_handle.close() return [fq1_out, fq2_out]
python
def filter_reads_by_length(fq1, fq2, quality_format, min_length=20): """ removes reads from a pair of fastq files that are shorter than a minimum length. removes both ends of a read if one end falls below the threshold while maintaining the order of the reads """ logger.info("Removing reads in %s and %s that " "are less than %d bases." % (fq1, fq2, min_length)) fq1_out = utils.append_stem(fq1, ".fixed") fq2_out = utils.append_stem(fq2, ".fixed") fq1_single = utils.append_stem(fq1, ".singles") fq2_single = utils.append_stem(fq2, ".singles") if all(map(utils.file_exists, [fq1_out, fq2_out, fq2_single, fq2_single])): return [fq1_out, fq2_out] fq1_in = SeqIO.parse(fq1, quality_format) fq2_in = SeqIO.parse(fq2, quality_format) out_files = [fq1_out, fq2_out, fq1_single, fq2_single] with file_transaction(out_files) as tmp_out_files: fq1_out_handle = open(tmp_out_files[0], "w") fq2_out_handle = open(tmp_out_files[1], "w") fq1_single_handle = open(tmp_out_files[2], "w") fq2_single_handle = open(tmp_out_files[3], "w") for fq1_record, fq2_record in zip(fq1_in, fq2_in): if len(fq1_record.seq) >= min_length and len(fq2_record.seq) >= min_length: fq1_out_handle.write(fq1_record.format(quality_format)) fq2_out_handle.write(fq2_record.format(quality_format)) else: if len(fq1_record.seq) > min_length: fq1_single_handle.write(fq1_record.format(quality_format)) if len(fq2_record.seq) > min_length: fq2_single_handle.write(fq2_record.format(quality_format)) fq1_out_handle.close() fq2_out_handle.close() fq1_single_handle.close() fq2_single_handle.close() return [fq1_out, fq2_out]
[ "def", "filter_reads_by_length", "(", "fq1", ",", "fq2", ",", "quality_format", ",", "min_length", "=", "20", ")", ":", "logger", ".", "info", "(", "\"Removing reads in %s and %s that \"", "\"are less than %d bases.\"", "%", "(", "fq1", ",", "fq2", ",", "min_lengt...
removes reads from a pair of fastq files that are shorter than a minimum length. removes both ends of a read if one end falls below the threshold while maintaining the order of the reads
[ "removes", "reads", "from", "a", "pair", "of", "fastq", "files", "that", "are", "shorter", "than", "a", "minimum", "length", ".", "removes", "both", "ends", "of", "a", "read", "if", "one", "end", "falls", "below", "the", "threshold", "while", "maintaining"...
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/fastq.py#L57-L99
223,040
bcbio/bcbio-nextgen
bcbio/bam/fastq.py
rstrip_extra
def rstrip_extra(fname): """Strip extraneous, non-discriminative filename info from the end of a file. """ to_strip = ("_R", ".R", "-R", "_", "fastq", ".", "-") while fname.endswith(to_strip): for x in to_strip: if fname.endswith(x): fname = fname[:len(fname) - len(x)] break return fname
python
def rstrip_extra(fname): """Strip extraneous, non-discriminative filename info from the end of a file. """ to_strip = ("_R", ".R", "-R", "_", "fastq", ".", "-") while fname.endswith(to_strip): for x in to_strip: if fname.endswith(x): fname = fname[:len(fname) - len(x)] break return fname
[ "def", "rstrip_extra", "(", "fname", ")", ":", "to_strip", "=", "(", "\"_R\"", ",", "\".R\"", ",", "\"-R\"", ",", "\"_\"", ",", "\"fastq\"", ",", "\".\"", ",", "\"-\"", ")", "while", "fname", ".", "endswith", "(", "to_strip", ")", ":", "for", "x", "i...
Strip extraneous, non-discriminative filename info from the end of a file.
[ "Strip", "extraneous", "non", "-", "discriminative", "filename", "info", "from", "the", "end", "of", "a", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/fastq.py#L101-L110
223,041
bcbio/bcbio-nextgen
bcbio/bam/fastq.py
fast_combine_pairs
def fast_combine_pairs(files, force_single, full_name, separators): """ assume files that need to be paired are within 10 entries of each other, once the list is sorted """ files = sort_filenames(files) chunks = tz.sliding_window(10, files) pairs = [combine_pairs(chunk, force_single, full_name, separators) for chunk in chunks] pairs = [y for x in pairs for y in x] longest = defaultdict(list) # for each file, save the longest pair it is in for pair in pairs: for file in pair: if len(longest[file]) < len(pair): longest[file] = pair # keep only unique pairs longest = {tuple(sort_filenames(x)) for x in longest.values()} # ensure filenames are R1 followed by R2 return [sort_filenames(list(x)) for x in longest]
python
def fast_combine_pairs(files, force_single, full_name, separators): """ assume files that need to be paired are within 10 entries of each other, once the list is sorted """ files = sort_filenames(files) chunks = tz.sliding_window(10, files) pairs = [combine_pairs(chunk, force_single, full_name, separators) for chunk in chunks] pairs = [y for x in pairs for y in x] longest = defaultdict(list) # for each file, save the longest pair it is in for pair in pairs: for file in pair: if len(longest[file]) < len(pair): longest[file] = pair # keep only unique pairs longest = {tuple(sort_filenames(x)) for x in longest.values()} # ensure filenames are R1 followed by R2 return [sort_filenames(list(x)) for x in longest]
[ "def", "fast_combine_pairs", "(", "files", ",", "force_single", ",", "full_name", ",", "separators", ")", ":", "files", "=", "sort_filenames", "(", "files", ")", "chunks", "=", "tz", ".", "sliding_window", "(", "10", ",", "files", ")", "pairs", "=", "[", ...
assume files that need to be paired are within 10 entries of each other, once the list is sorted
[ "assume", "files", "that", "need", "to", "be", "paired", "are", "within", "10", "entries", "of", "each", "other", "once", "the", "list", "is", "sorted" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/fastq.py#L187-L204
223,042
bcbio/bcbio-nextgen
bcbio/bam/fastq.py
open_fastq
def open_fastq(in_file): """ open a fastq file, using gzip if it is gzipped """ if objectstore.is_remote(in_file): return objectstore.open_file(in_file) else: return utils.open_gzipsafe(in_file)
python
def open_fastq(in_file): """ open a fastq file, using gzip if it is gzipped """ if objectstore.is_remote(in_file): return objectstore.open_file(in_file) else: return utils.open_gzipsafe(in_file)
[ "def", "open_fastq", "(", "in_file", ")", ":", "if", "objectstore", ".", "is_remote", "(", "in_file", ")", ":", "return", "objectstore", ".", "open_file", "(", "in_file", ")", "else", ":", "return", "utils", ".", "open_gzipsafe", "(", "in_file", ")" ]
open a fastq file, using gzip if it is gzipped
[ "open", "a", "fastq", "file", "using", "gzip", "if", "it", "is", "gzipped" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/fastq.py#L309-L315
223,043
bcbio/bcbio-nextgen
bcbio/variation/strelka2.py
get_region_bed
def get_region_bed(region, items, out_file, want_gzip=True): """Retrieve BED file of regions to analyze, either single or multi-region. """ variant_regions = bedutils.population_variant_regions(items, merged=True) target = shared.subset_variant_regions(variant_regions, region, out_file, items) if not target: raise ValueError("Need BED input for strelka2 regions: %s %s" % (region, target)) if not isinstance(target, six.string_types) or not os.path.isfile(target): chrom, start, end = target target = "%s-regions.bed" % utils.splitext_plus(out_file)[0] with file_transaction(items[0], target) as tx_out_file: with open(tx_out_file, "w") as out_handle: out_handle.write("%s\t%s\t%s\n" % (chrom, start, end)) out_file = target if want_gzip: out_file = vcfutils.bgzip_and_index(out_file, items[0]["config"]) return out_file
python
def get_region_bed(region, items, out_file, want_gzip=True): """Retrieve BED file of regions to analyze, either single or multi-region. """ variant_regions = bedutils.population_variant_regions(items, merged=True) target = shared.subset_variant_regions(variant_regions, region, out_file, items) if not target: raise ValueError("Need BED input for strelka2 regions: %s %s" % (region, target)) if not isinstance(target, six.string_types) or not os.path.isfile(target): chrom, start, end = target target = "%s-regions.bed" % utils.splitext_plus(out_file)[0] with file_transaction(items[0], target) as tx_out_file: with open(tx_out_file, "w") as out_handle: out_handle.write("%s\t%s\t%s\n" % (chrom, start, end)) out_file = target if want_gzip: out_file = vcfutils.bgzip_and_index(out_file, items[0]["config"]) return out_file
[ "def", "get_region_bed", "(", "region", ",", "items", ",", "out_file", ",", "want_gzip", "=", "True", ")", ":", "variant_regions", "=", "bedutils", ".", "population_variant_regions", "(", "items", ",", "merged", "=", "True", ")", "target", "=", "shared", "."...
Retrieve BED file of regions to analyze, either single or multi-region.
[ "Retrieve", "BED", "file", "of", "regions", "to", "analyze", "either", "single", "or", "multi", "-", "region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L36-L52
223,044
bcbio/bcbio-nextgen
bcbio/variation/strelka2.py
coverage_interval_from_bed
def coverage_interval_from_bed(bed_file, per_chrom=True): """Calculate a coverage interval for the current region BED. This helps correctly work with cases of uneven coverage across an analysis genome. strelka2 and other model based callers have flags for targeted and non which depend on the local context. Checks coverage per chromosome, avoiding non-standard chromosomes, if per_chrom is set. Otherwise does a global check over all regions. The global check performs better for strelka2 but not for DeepVariant: https://github.com/bcbio/bcbio_validations/tree/master/deepvariant#deepvariant-v06-release-strelka2-stratification-and-initial-gatk-cnn """ total_starts = {} total_ends = {} bed_bases = collections.defaultdict(int) with utils.open_gzipsafe(bed_file) as in_handle: for line in in_handle: parts = line.split() if len(parts) >= 3: chrom, start, end = parts[:3] if chromhacks.is_autosomal(chrom): start = int(start) end = int(end) bed_bases[chrom] += (end - start) total_starts[chrom] = min([start, total_starts.get(chrom, sys.maxsize)]) total_ends[chrom] = max([end, total_ends.get(chrom, 0)]) # can check per chromosome -- any one chromosome with larger, or over all regions if per_chrom: freqs = [float(bed_bases[c]) / float(total_ends[c] - total_starts[c]) for c in sorted(bed_bases.keys())] elif len(bed_bases) > 0: freqs = [sum([bed_bases[c] for c in sorted(bed_bases.keys())]) / sum([float(total_ends[c] - total_starts[c]) for c in sorted(bed_bases.keys())])] else: freqs = [] # Should be importing GENOME_COV_THRESH but get circular imports if any([f >= 0.40 for f in freqs]): return "genome" else: return "targeted"
python
def coverage_interval_from_bed(bed_file, per_chrom=True): """Calculate a coverage interval for the current region BED. This helps correctly work with cases of uneven coverage across an analysis genome. strelka2 and other model based callers have flags for targeted and non which depend on the local context. Checks coverage per chromosome, avoiding non-standard chromosomes, if per_chrom is set. Otherwise does a global check over all regions. The global check performs better for strelka2 but not for DeepVariant: https://github.com/bcbio/bcbio_validations/tree/master/deepvariant#deepvariant-v06-release-strelka2-stratification-and-initial-gatk-cnn """ total_starts = {} total_ends = {} bed_bases = collections.defaultdict(int) with utils.open_gzipsafe(bed_file) as in_handle: for line in in_handle: parts = line.split() if len(parts) >= 3: chrom, start, end = parts[:3] if chromhacks.is_autosomal(chrom): start = int(start) end = int(end) bed_bases[chrom] += (end - start) total_starts[chrom] = min([start, total_starts.get(chrom, sys.maxsize)]) total_ends[chrom] = max([end, total_ends.get(chrom, 0)]) # can check per chromosome -- any one chromosome with larger, or over all regions if per_chrom: freqs = [float(bed_bases[c]) / float(total_ends[c] - total_starts[c]) for c in sorted(bed_bases.keys())] elif len(bed_bases) > 0: freqs = [sum([bed_bases[c] for c in sorted(bed_bases.keys())]) / sum([float(total_ends[c] - total_starts[c]) for c in sorted(bed_bases.keys())])] else: freqs = [] # Should be importing GENOME_COV_THRESH but get circular imports if any([f >= 0.40 for f in freqs]): return "genome" else: return "targeted"
[ "def", "coverage_interval_from_bed", "(", "bed_file", ",", "per_chrom", "=", "True", ")", ":", "total_starts", "=", "{", "}", "total_ends", "=", "{", "}", "bed_bases", "=", "collections", ".", "defaultdict", "(", "int", ")", "with", "utils", ".", "open_gzips...
Calculate a coverage interval for the current region BED. This helps correctly work with cases of uneven coverage across an analysis genome. strelka2 and other model based callers have flags for targeted and non which depend on the local context. Checks coverage per chromosome, avoiding non-standard chromosomes, if per_chrom is set. Otherwise does a global check over all regions. The global check performs better for strelka2 but not for DeepVariant: https://github.com/bcbio/bcbio_validations/tree/master/deepvariant#deepvariant-v06-release-strelka2-stratification-and-initial-gatk-cnn
[ "Calculate", "a", "coverage", "interval", "for", "the", "current", "region", "BED", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L54-L93
223,045
bcbio/bcbio-nextgen
bcbio/variation/strelka2.py
_is_targeted_region
def _is_targeted_region(cur_bed, data): """Calculate if we should process region as a targeted or WGS. Currently always based on total coverage interval, as that validates best and is consistent between CWL (larger blocks) and non-CWL runs (smaller blocks). We can check core usage and provide a consistent report when moving to CWL exclusively. """ cores = dd.get_num_cores(data) if cores > 0: # Apply to all core setups now for consistency return dd.get_coverage_interval(data) not in ["genome"] else: return coverage_interval_from_bed(cur_bed, per_chrom=False) == "targeted"
python
def _is_targeted_region(cur_bed, data): """Calculate if we should process region as a targeted or WGS. Currently always based on total coverage interval, as that validates best and is consistent between CWL (larger blocks) and non-CWL runs (smaller blocks). We can check core usage and provide a consistent report when moving to CWL exclusively. """ cores = dd.get_num_cores(data) if cores > 0: # Apply to all core setups now for consistency return dd.get_coverage_interval(data) not in ["genome"] else: return coverage_interval_from_bed(cur_bed, per_chrom=False) == "targeted"
[ "def", "_is_targeted_region", "(", "cur_bed", ",", "data", ")", ":", "cores", "=", "dd", ".", "get_num_cores", "(", "data", ")", "if", "cores", ">", "0", ":", "# Apply to all core setups now for consistency", "return", "dd", ".", "get_coverage_interval", "(", "d...
Calculate if we should process region as a targeted or WGS. Currently always based on total coverage interval, as that validates best and is consistent between CWL (larger blocks) and non-CWL runs (smaller blocks). We can check core usage and provide a consistent report when moving to CWL exclusively.
[ "Calculate", "if", "we", "should", "process", "region", "as", "a", "targeted", "or", "WGS", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L95-L107
223,046
bcbio/bcbio-nextgen
bcbio/variation/strelka2.py
_postprocess_somatic
def _postprocess_somatic(in_file, paired): """Post-process somatic calls to provide standard output. - Converts SGT and NT into standard VCF GT fields - Replace generic TUMOR NORMAL names in VCF with sample names. """ out_file = in_file.replace(".vcf.gz", "-fixed.vcf") if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"): with file_transaction(paired.tumor_data, out_file) as tx_out_file: with utils.open_gzipsafe(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: added_gt = False normal_index, tumor_index = (None, None) for line in in_handle: if line.startswith("##FORMAT") and not added_gt: added_gt = True out_handle.write('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n') out_handle.write(line) elif line.startswith("#CHROM"): assert added_gt parts = line.strip().split("\t") normal_index = parts.index("NORMAL") tumor_index = parts.index("TUMOR") line = line.replace("NORMAL", paired.normal_name).replace("TUMOR", paired.tumor_name) out_handle.write(line) elif line.startswith("#"): out_handle.write(line) else: parts = line.rstrip().split("\t") tumor_gt, normal_gt = _tumor_normal_genotypes(parts[3], parts[4].split(","), parts[7].split(";"), in_file, parts[:2]) parts[8] = "GT:%s" % parts[8] parts[normal_index] = "%s:%s" % (normal_gt, parts[normal_index]) parts[tumor_index] = "%s:%s" % (tumor_gt, parts[tumor_index]) out_handle.write("\t".join(parts) + "\n") return vcfutils.bgzip_and_index(out_file, paired.tumor_data["config"])
python
def _postprocess_somatic(in_file, paired): """Post-process somatic calls to provide standard output. - Converts SGT and NT into standard VCF GT fields - Replace generic TUMOR NORMAL names in VCF with sample names. """ out_file = in_file.replace(".vcf.gz", "-fixed.vcf") if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"): with file_transaction(paired.tumor_data, out_file) as tx_out_file: with utils.open_gzipsafe(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: added_gt = False normal_index, tumor_index = (None, None) for line in in_handle: if line.startswith("##FORMAT") and not added_gt: added_gt = True out_handle.write('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n') out_handle.write(line) elif line.startswith("#CHROM"): assert added_gt parts = line.strip().split("\t") normal_index = parts.index("NORMAL") tumor_index = parts.index("TUMOR") line = line.replace("NORMAL", paired.normal_name).replace("TUMOR", paired.tumor_name) out_handle.write(line) elif line.startswith("#"): out_handle.write(line) else: parts = line.rstrip().split("\t") tumor_gt, normal_gt = _tumor_normal_genotypes(parts[3], parts[4].split(","), parts[7].split(";"), in_file, parts[:2]) parts[8] = "GT:%s" % parts[8] parts[normal_index] = "%s:%s" % (normal_gt, parts[normal_index]) parts[tumor_index] = "%s:%s" % (tumor_gt, parts[tumor_index]) out_handle.write("\t".join(parts) + "\n") return vcfutils.bgzip_and_index(out_file, paired.tumor_data["config"])
[ "def", "_postprocess_somatic", "(", "in_file", ",", "paired", ")", ":", "out_file", "=", "in_file", ".", "replace", "(", "\".vcf.gz\"", ",", "\"-fixed.vcf\"", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", "and", "not", "utils", ".", ...
Post-process somatic calls to provide standard output. - Converts SGT and NT into standard VCF GT fields - Replace generic TUMOR NORMAL names in VCF with sample names.
[ "Post", "-", "process", "somatic", "calls", "to", "provide", "standard", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L272-L307
223,047
bcbio/bcbio-nextgen
bcbio/variation/strelka2.py
_run_workflow
def _run_workflow(data, workflow_file, work_dir): """Run Strelka2 analysis inside prepared workflow directory. """ utils.remove_safe(os.path.join(work_dir, "workspace")) cmd = [utils.get_program_python("configureStrelkaGermlineWorkflow.py"), workflow_file, "-m", "local", "-j", dd.get_num_cores(data), "--quiet"] do.run(cmd, "Run Strelka2: %s" % dd.get_sample_name(data)) utils.remove_safe(os.path.join(work_dir, "workspace"))
python
def _run_workflow(data, workflow_file, work_dir): """Run Strelka2 analysis inside prepared workflow directory. """ utils.remove_safe(os.path.join(work_dir, "workspace")) cmd = [utils.get_program_python("configureStrelkaGermlineWorkflow.py"), workflow_file, "-m", "local", "-j", dd.get_num_cores(data), "--quiet"] do.run(cmd, "Run Strelka2: %s" % dd.get_sample_name(data)) utils.remove_safe(os.path.join(work_dir, "workspace"))
[ "def", "_run_workflow", "(", "data", ",", "workflow_file", ",", "work_dir", ")", ":", "utils", ".", "remove_safe", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"workspace\"", ")", ")", "cmd", "=", "[", "utils", ".", "get_program_python", ...
Run Strelka2 analysis inside prepared workflow directory.
[ "Run", "Strelka2", "analysis", "inside", "prepared", "workflow", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L320-L327
223,048
bcbio/bcbio-nextgen
bcbio/variation/strelka2.py
run_gvcfgenotyper
def run_gvcfgenotyper(data, orig_region, vrn_files, out_file): """Merge strelka2 and Illumina compatible gVCFs with gvcfgenotyper. https://github.com/Illumina/gvcfgenotyper Also need to explore GLnexus (https://github.com/dnanexus-rnd/GLnexus) """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: regions = _find_gvcf_blocks(vrn_files[0], bamprep.region_to_gatk(orig_region), os.path.dirname(tx_out_file)) if len(regions) == 1: _run_gvcfgenotyper(data, regions[0], vrn_files, tx_out_file) else: split_outs = [_run_gvcfgenotyper(data, r, vrn_files, "%s-%s.vcf.gz" % (utils.splitext_plus(out_file)[0], r.replace(":", "_").replace("-", "_"))) for r in regions] vcfutils.concat_variant_files(split_outs, tx_out_file, regions, dd.get_ref_file(data), data["config"]) return vcfutils.bgzip_and_index(out_file, data["config"])
python
def run_gvcfgenotyper(data, orig_region, vrn_files, out_file): """Merge strelka2 and Illumina compatible gVCFs with gvcfgenotyper. https://github.com/Illumina/gvcfgenotyper Also need to explore GLnexus (https://github.com/dnanexus-rnd/GLnexus) """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: regions = _find_gvcf_blocks(vrn_files[0], bamprep.region_to_gatk(orig_region), os.path.dirname(tx_out_file)) if len(regions) == 1: _run_gvcfgenotyper(data, regions[0], vrn_files, tx_out_file) else: split_outs = [_run_gvcfgenotyper(data, r, vrn_files, "%s-%s.vcf.gz" % (utils.splitext_plus(out_file)[0], r.replace(":", "_").replace("-", "_"))) for r in regions] vcfutils.concat_variant_files(split_outs, tx_out_file, regions, dd.get_ref_file(data), data["config"]) return vcfutils.bgzip_and_index(out_file, data["config"])
[ "def", "run_gvcfgenotyper", "(", "data", ",", "orig_region", ",", "vrn_files", ",", "out_file", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file",...
Merge strelka2 and Illumina compatible gVCFs with gvcfgenotyper. https://github.com/Illumina/gvcfgenotyper Also need to explore GLnexus (https://github.com/dnanexus-rnd/GLnexus)
[ "Merge", "strelka2", "and", "Illumina", "compatible", "gVCFs", "with", "gvcfgenotyper", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L331-L351
223,049
bcbio/bcbio-nextgen
bcbio/variation/strelka2.py
_run_gvcfgenotyper
def _run_gvcfgenotyper(data, region, vrn_files, out_file): """Run gvcfgenotyper on a single gVCF region in input file. """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: input_file = "%s-inputs.txt" % utils.splitext_plus(tx_out_file)[0] with open(input_file, "w") as out_handle: out_handle.write("%s\n" % "\n".join(vrn_files)) cmd = ["gvcfgenotyper", "-f", dd.get_ref_file(data), "-l", input_file, "-r", region, "-O", "z", "-o", tx_out_file] do.run(cmd, "gvcfgenotyper: %s %s" % (dd.get_sample_name(data), region)) return out_file
python
def _run_gvcfgenotyper(data, region, vrn_files, out_file): """Run gvcfgenotyper on a single gVCF region in input file. """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: input_file = "%s-inputs.txt" % utils.splitext_plus(tx_out_file)[0] with open(input_file, "w") as out_handle: out_handle.write("%s\n" % "\n".join(vrn_files)) cmd = ["gvcfgenotyper", "-f", dd.get_ref_file(data), "-l", input_file, "-r", region, "-O", "z", "-o", tx_out_file] do.run(cmd, "gvcfgenotyper: %s %s" % (dd.get_sample_name(data), region)) return out_file
[ "def", "_run_gvcfgenotyper", "(", "data", ",", "region", ",", "vrn_files", ",", "out_file", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file", "...
Run gvcfgenotyper on a single gVCF region in input file.
[ "Run", "gvcfgenotyper", "on", "a", "single", "gVCF", "region", "in", "input", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L353-L364
223,050
bcbio/bcbio-nextgen
bcbio/variation/strelka2.py
_find_gvcf_blocks
def _find_gvcf_blocks(vcf_file, region, tmp_dir): """Retrieve gVCF blocks within our current evaluation region. gvcfgenotyper does not support calling larger regions with individual coverage blocks, so we split our big region into potentially multiple. """ region_file = os.path.join(tmp_dir, "cur_region.bed") with open(region_file, "w") as out_handle: chrom, coords = region.split(":") start, end = coords.split("-") out_handle.write("\t".join([chrom, start, end]) + "\n") final_file = os.path.join(tmp_dir, "split_regions.bed") cmd = "gvcf_regions.py {vcf_file} | bedtools intersect -a - -b {region_file} > {final_file}" do.run(cmd.format(**locals())) regions = [] with open(final_file) as in_handle: for line in in_handle: chrom, start, end = line.strip().split("\t") regions.append("%s:%s-%s" % (chrom, start, end)) return regions
python
def _find_gvcf_blocks(vcf_file, region, tmp_dir): """Retrieve gVCF blocks within our current evaluation region. gvcfgenotyper does not support calling larger regions with individual coverage blocks, so we split our big region into potentially multiple. """ region_file = os.path.join(tmp_dir, "cur_region.bed") with open(region_file, "w") as out_handle: chrom, coords = region.split(":") start, end = coords.split("-") out_handle.write("\t".join([chrom, start, end]) + "\n") final_file = os.path.join(tmp_dir, "split_regions.bed") cmd = "gvcf_regions.py {vcf_file} | bedtools intersect -a - -b {region_file} > {final_file}" do.run(cmd.format(**locals())) regions = [] with open(final_file) as in_handle: for line in in_handle: chrom, start, end = line.strip().split("\t") regions.append("%s:%s-%s" % (chrom, start, end)) return regions
[ "def", "_find_gvcf_blocks", "(", "vcf_file", ",", "region", ",", "tmp_dir", ")", ":", "region_file", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "\"cur_region.bed\"", ")", "with", "open", "(", "region_file", ",", "\"w\"", ")", "as", "out_hand...
Retrieve gVCF blocks within our current evaluation region. gvcfgenotyper does not support calling larger regions with individual coverage blocks, so we split our big region into potentially multiple.
[ "Retrieve", "gVCF", "blocks", "within", "our", "current", "evaluation", "region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L366-L385
223,051
bcbio/bcbio-nextgen
bcbio/hla/__init__.py
run
def run(samples, run_parallel): """Run HLA detection on the input samples. """ to_process = [] extras = [] for data in (xs[0] for xs in samples): hlacaller = tz.get_in(["config", "algorithm", "hlacaller"], data) if hlacaller: to_process.append(data) else: extras.append([data]) processed = run_parallel("call_hla", ([x] for x in to_process)) return extras + processed
python
def run(samples, run_parallel): """Run HLA detection on the input samples. """ to_process = [] extras = [] for data in (xs[0] for xs in samples): hlacaller = tz.get_in(["config", "algorithm", "hlacaller"], data) if hlacaller: to_process.append(data) else: extras.append([data]) processed = run_parallel("call_hla", ([x] for x in to_process)) return extras + processed
[ "def", "run", "(", "samples", ",", "run_parallel", ")", ":", "to_process", "=", "[", "]", "extras", "=", "[", "]", "for", "data", "in", "(", "xs", "[", "0", "]", "for", "xs", "in", "samples", ")", ":", "hlacaller", "=", "tz", ".", "get_in", "(", ...
Run HLA detection on the input samples.
[ "Run", "HLA", "detection", "on", "the", "input", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/__init__.py#L20-L32
223,052
bcbio/bcbio-nextgen
bcbio/ngsalign/bwa.py
align_bam
def align_bam(in_bam, ref_file, names, align_dir, data): """Perform direct alignment of an input BAM file with BWA using pipes. This avoids disk IO by piping between processes: - samtools sort of input BAM to queryname - bedtools conversion to interleaved FASTQ - bwa-mem alignment - samtools conversion to BAM - samtools sort to coordinate """ config = data["config"] out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"])) samtools = config_utils.get_program("samtools", config) bedtools = config_utils.get_program("bedtools", config) resources = config_utils.get_resources("samtools", config) num_cores = config["algorithm"].get("num_cores", 1) # adjust memory for samtools since used for input and output max_mem = config_utils.adjust_memory(resources.get("memory", "1G"), 3, "decrease").upper() if not utils.file_exists(out_file): with tx_tmpdir(data) as work_dir: with postalign.tobam_cl(data, out_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file): bwa_cmd = _get_bwa_mem_cmd(data, out_file, ref_file, "-") tx_out_prefix = os.path.splitext(tx_out_file)[0] prefix1 = "%s-in1" % tx_out_prefix cmd = ("unset JAVA_HOME && " "{samtools} sort -n -o -l 1 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} " "| {bedtools} bamtofastq -i /dev/stdin -fq /dev/stdout -fq2 /dev/stdout " "| {bwa_cmd} | ") cmd = cmd.format(**locals()) + tobam_cl do.run(cmd, "bwa mem alignment from BAM: %s" % names["sample"], None, [do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)]) return out_file
python
def align_bam(in_bam, ref_file, names, align_dir, data): """Perform direct alignment of an input BAM file with BWA using pipes. This avoids disk IO by piping between processes: - samtools sort of input BAM to queryname - bedtools conversion to interleaved FASTQ - bwa-mem alignment - samtools conversion to BAM - samtools sort to coordinate """ config = data["config"] out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"])) samtools = config_utils.get_program("samtools", config) bedtools = config_utils.get_program("bedtools", config) resources = config_utils.get_resources("samtools", config) num_cores = config["algorithm"].get("num_cores", 1) # adjust memory for samtools since used for input and output max_mem = config_utils.adjust_memory(resources.get("memory", "1G"), 3, "decrease").upper() if not utils.file_exists(out_file): with tx_tmpdir(data) as work_dir: with postalign.tobam_cl(data, out_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file): bwa_cmd = _get_bwa_mem_cmd(data, out_file, ref_file, "-") tx_out_prefix = os.path.splitext(tx_out_file)[0] prefix1 = "%s-in1" % tx_out_prefix cmd = ("unset JAVA_HOME && " "{samtools} sort -n -o -l 1 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} " "| {bedtools} bamtofastq -i /dev/stdin -fq /dev/stdout -fq2 /dev/stdout " "| {bwa_cmd} | ") cmd = cmd.format(**locals()) + tobam_cl do.run(cmd, "bwa mem alignment from BAM: %s" % names["sample"], None, [do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)]) return out_file
[ "def", "align_bam", "(", "in_bam", ",", "ref_file", ",", "names", ",", "align_dir", ",", "data", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "out_file", "=", "os", ".", "path", ".", "join", "(", "align_dir", ",", "\"{0}-sort.bam\"", ".", "...
Perform direct alignment of an input BAM file with BWA using pipes. This avoids disk IO by piping between processes: - samtools sort of input BAM to queryname - bedtools conversion to interleaved FASTQ - bwa-mem alignment - samtools conversion to BAM - samtools sort to coordinate
[ "Perform", "direct", "alignment", "of", "an", "input", "BAM", "file", "with", "BWA", "using", "pipes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bwa.py#L21-L53
223,053
bcbio/bcbio-nextgen
bcbio/ngsalign/bwa.py
_get_bwa_mem_cmd
def _get_bwa_mem_cmd(data, out_file, ref_file, fastq1, fastq2=""): """Perform piped bwa mem mapping potentially with alternative alleles in GRCh38 + HLA typing. Commands for HLA post-processing: base=TEST run-HLA $base.hla > $base.hla.top cat $base.hla.HLA*.gt | grep ^GT | cut -f2- > $base.hla.all rm -f $base.hla.HLA*gt rm -f $base.hla.HLA*gz """ alt_file = ref_file + ".alt" if utils.file_exists(alt_file) and dd.get_hlacaller(data): bwakit_dir = os.path.dirname(os.path.realpath(utils.which("run-bwamem"))) hla_base = os.path.join(utils.safe_makedir(os.path.join(os.path.dirname(out_file), "hla")), os.path.basename(out_file) + ".hla") alt_cmd = (" | {bwakit_dir}/k8 {bwakit_dir}/bwa-postalt.js -p {hla_base} {alt_file}") else: alt_cmd = "" if dd.get_aligner(data) == "sentieon-bwa": bwa_exe = "sentieon-bwa" exports = sentieon.license_export(data) else: bwa_exe = "bwa" exports = "" bwa = config_utils.get_program(bwa_exe, data["config"]) num_cores = data["config"]["algorithm"].get("num_cores", 1) bwa_resources = config_utils.get_resources("bwa", data["config"]) bwa_params = (" ".join([str(x) for x in bwa_resources.get("options", [])]) if "options" in bwa_resources else "") rg_info = novoalign.get_rg_info(data["rgnames"]) # For UMI runs, pass along consensus tags c_tags = "-C" if "umi_bam" in data else "" pairing = "-p" if not fastq2 else "" # Restrict seed occurances to 1/2 of default, manage memory usage for centromere repeats in hg38 # https://sourceforge.net/p/bio-bwa/mailman/message/31514937/ # http://ehc.ac/p/bio-bwa/mailman/message/32268544/ mem_usage = "-c 250" bwa_cmd = ("{exports}{bwa} mem {pairing} {c_tags} {mem_usage} -M -t {num_cores} {bwa_params} -R '{rg_info}' " "-v 1 {ref_file} {fastq1} {fastq2} ") return (bwa_cmd + alt_cmd).format(**locals())
python
def _get_bwa_mem_cmd(data, out_file, ref_file, fastq1, fastq2=""): """Perform piped bwa mem mapping potentially with alternative alleles in GRCh38 + HLA typing. Commands for HLA post-processing: base=TEST run-HLA $base.hla > $base.hla.top cat $base.hla.HLA*.gt | grep ^GT | cut -f2- > $base.hla.all rm -f $base.hla.HLA*gt rm -f $base.hla.HLA*gz """ alt_file = ref_file + ".alt" if utils.file_exists(alt_file) and dd.get_hlacaller(data): bwakit_dir = os.path.dirname(os.path.realpath(utils.which("run-bwamem"))) hla_base = os.path.join(utils.safe_makedir(os.path.join(os.path.dirname(out_file), "hla")), os.path.basename(out_file) + ".hla") alt_cmd = (" | {bwakit_dir}/k8 {bwakit_dir}/bwa-postalt.js -p {hla_base} {alt_file}") else: alt_cmd = "" if dd.get_aligner(data) == "sentieon-bwa": bwa_exe = "sentieon-bwa" exports = sentieon.license_export(data) else: bwa_exe = "bwa" exports = "" bwa = config_utils.get_program(bwa_exe, data["config"]) num_cores = data["config"]["algorithm"].get("num_cores", 1) bwa_resources = config_utils.get_resources("bwa", data["config"]) bwa_params = (" ".join([str(x) for x in bwa_resources.get("options", [])]) if "options" in bwa_resources else "") rg_info = novoalign.get_rg_info(data["rgnames"]) # For UMI runs, pass along consensus tags c_tags = "-C" if "umi_bam" in data else "" pairing = "-p" if not fastq2 else "" # Restrict seed occurances to 1/2 of default, manage memory usage for centromere repeats in hg38 # https://sourceforge.net/p/bio-bwa/mailman/message/31514937/ # http://ehc.ac/p/bio-bwa/mailman/message/32268544/ mem_usage = "-c 250" bwa_cmd = ("{exports}{bwa} mem {pairing} {c_tags} {mem_usage} -M -t {num_cores} {bwa_params} -R '{rg_info}' " "-v 1 {ref_file} {fastq1} {fastq2} ") return (bwa_cmd + alt_cmd).format(**locals())
[ "def", "_get_bwa_mem_cmd", "(", "data", ",", "out_file", ",", "ref_file", ",", "fastq1", ",", "fastq2", "=", "\"\"", ")", ":", "alt_file", "=", "ref_file", "+", "\".alt\"", "if", "utils", ".", "file_exists", "(", "alt_file", ")", "and", "dd", ".", "get_h...
Perform piped bwa mem mapping potentially with alternative alleles in GRCh38 + HLA typing. Commands for HLA post-processing: base=TEST run-HLA $base.hla > $base.hla.top cat $base.hla.HLA*.gt | grep ^GT | cut -f2- > $base.hla.all rm -f $base.hla.HLA*gt rm -f $base.hla.HLA*gz
[ "Perform", "piped", "bwa", "mem", "mapping", "potentially", "with", "alternative", "alleles", "in", "GRCh38", "+", "HLA", "typing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bwa.py#L55-L94
223,054
bcbio/bcbio-nextgen
bcbio/ngsalign/bwa.py
_align_mem
def _align_mem(fastq_file, pair_file, ref_file, out_file, names, rg_info, data): """Perform bwa-mem alignment on supported read lengths. """ with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file): cmd = ("unset JAVA_HOME && " "%s | %s" % (_get_bwa_mem_cmd(data, out_file, ref_file, fastq_file, pair_file), tobam_cl)) do.run(cmd, "bwa mem alignment from fastq: %s" % names["sample"], None, [do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, fastq_file)]) return out_file
python
def _align_mem(fastq_file, pair_file, ref_file, out_file, names, rg_info, data): """Perform bwa-mem alignment on supported read lengths. """ with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file): cmd = ("unset JAVA_HOME && " "%s | %s" % (_get_bwa_mem_cmd(data, out_file, ref_file, fastq_file, pair_file), tobam_cl)) do.run(cmd, "bwa mem alignment from fastq: %s" % names["sample"], None, [do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, fastq_file)]) return out_file
[ "def", "_align_mem", "(", "fastq_file", ",", "pair_file", ",", "ref_file", ",", "out_file", ",", "names", ",", "rg_info", ",", "data", ")", ":", "with", "postalign", ".", "tobam_cl", "(", "data", ",", "out_file", ",", "pair_file", "!=", "\"\"", ")", "as"...
Perform bwa-mem alignment on supported read lengths.
[ "Perform", "bwa", "-", "mem", "alignment", "on", "supported", "read", "lengths", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bwa.py#L174-L182
223,055
bcbio/bcbio-nextgen
bcbio/ngsalign/bwa.py
_align_backtrack
def _align_backtrack(fastq_file, pair_file, ref_file, out_file, names, rg_info, data): """Perform a BWA alignment using 'aln' backtrack algorithm. """ bwa = config_utils.get_program("bwa", data["config"]) config = data["config"] sai1_file = "%s_1.sai" % os.path.splitext(out_file)[0] sai2_file = "%s_2.sai" % os.path.splitext(out_file)[0] if pair_file else "" if not utils.file_exists(sai1_file): with file_transaction(data, sai1_file) as tx_sai1_file: _run_bwa_align(fastq_file, ref_file, tx_sai1_file, config) if sai2_file and not utils.file_exists(sai2_file): with file_transaction(data, sai2_file) as tx_sai2_file: _run_bwa_align(pair_file, ref_file, tx_sai2_file, config) with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file): align_type = "sampe" if sai2_file else "samse" cmd = ("unset JAVA_HOME && {bwa} {align_type} -r '{rg_info}' {ref_file} {sai1_file} {sai2_file} " "{fastq_file} {pair_file} | ") cmd = cmd.format(**locals()) + tobam_cl do.run(cmd, "bwa %s" % align_type, data) return out_file
python
def _align_backtrack(fastq_file, pair_file, ref_file, out_file, names, rg_info, data): """Perform a BWA alignment using 'aln' backtrack algorithm. """ bwa = config_utils.get_program("bwa", data["config"]) config = data["config"] sai1_file = "%s_1.sai" % os.path.splitext(out_file)[0] sai2_file = "%s_2.sai" % os.path.splitext(out_file)[0] if pair_file else "" if not utils.file_exists(sai1_file): with file_transaction(data, sai1_file) as tx_sai1_file: _run_bwa_align(fastq_file, ref_file, tx_sai1_file, config) if sai2_file and not utils.file_exists(sai2_file): with file_transaction(data, sai2_file) as tx_sai2_file: _run_bwa_align(pair_file, ref_file, tx_sai2_file, config) with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file): align_type = "sampe" if sai2_file else "samse" cmd = ("unset JAVA_HOME && {bwa} {align_type} -r '{rg_info}' {ref_file} {sai1_file} {sai2_file} " "{fastq_file} {pair_file} | ") cmd = cmd.format(**locals()) + tobam_cl do.run(cmd, "bwa %s" % align_type, data) return out_file
[ "def", "_align_backtrack", "(", "fastq_file", ",", "pair_file", ",", "ref_file", ",", "out_file", ",", "names", ",", "rg_info", ",", "data", ")", ":", "bwa", "=", "config_utils", ".", "get_program", "(", "\"bwa\"", ",", "data", "[", "\"config\"", "]", ")",...
Perform a BWA alignment using 'aln' backtrack algorithm.
[ "Perform", "a", "BWA", "alignment", "using", "aln", "backtrack", "algorithm", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bwa.py#L184-L203
223,056
bcbio/bcbio-nextgen
bcbio/pipeline/main.py
run_main
def run_main(workdir, config_file=None, fc_dir=None, run_info_yaml=None, parallel=None, workflow=None): """Run variant analysis, handling command line options. """ # Set environment to standard to use periods for decimals and avoid localization os.environ["LC_ALL"] = "C" os.environ["LC"] = "C" os.environ["LANG"] = "C" workdir = utils.safe_makedir(os.path.abspath(workdir)) os.chdir(workdir) config, config_file = config_utils.load_system_config(config_file, workdir) if config.get("log_dir", None) is None: config["log_dir"] = os.path.join(workdir, DEFAULT_LOG_DIR) if parallel["type"] in ["local", "clusterk"]: _setup_resources() _run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml) elif parallel["type"] == "ipython": assert parallel["scheduler"] is not None, "IPython parallel requires a specified scheduler (-s)" if parallel["scheduler"] != "sge": assert parallel["queue"] is not None, "IPython parallel requires a specified queue (-q)" elif not parallel["queue"]: parallel["queue"] = "" _run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml) else: raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
python
def run_main(workdir, config_file=None, fc_dir=None, run_info_yaml=None, parallel=None, workflow=None): """Run variant analysis, handling command line options. """ # Set environment to standard to use periods for decimals and avoid localization os.environ["LC_ALL"] = "C" os.environ["LC"] = "C" os.environ["LANG"] = "C" workdir = utils.safe_makedir(os.path.abspath(workdir)) os.chdir(workdir) config, config_file = config_utils.load_system_config(config_file, workdir) if config.get("log_dir", None) is None: config["log_dir"] = os.path.join(workdir, DEFAULT_LOG_DIR) if parallel["type"] in ["local", "clusterk"]: _setup_resources() _run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml) elif parallel["type"] == "ipython": assert parallel["scheduler"] is not None, "IPython parallel requires a specified scheduler (-s)" if parallel["scheduler"] != "sge": assert parallel["queue"] is not None, "IPython parallel requires a specified queue (-q)" elif not parallel["queue"]: parallel["queue"] = "" _run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml) else: raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
[ "def", "run_main", "(", "workdir", ",", "config_file", "=", "None", ",", "fc_dir", "=", "None", ",", "run_info_yaml", "=", "None", ",", "parallel", "=", "None", ",", "workflow", "=", "None", ")", ":", "# Set environment to standard to use periods for decimals and ...
Run variant analysis, handling command line options.
[ "Run", "variant", "analysis", "handling", "command", "line", "options", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/main.py#L29-L55
223,057
bcbio/bcbio-nextgen
bcbio/pipeline/main.py
_setup_resources
def _setup_resources(): """Attempt to increase resource limits up to hard limits. This allows us to avoid out of file handle limits where we can move beyond the soft limit up to the hard limit. """ target_procs = 10240 cur_proc, max_proc = resource.getrlimit(resource.RLIMIT_NPROC) target_proc = min(max_proc, target_procs) if max_proc > 0 else target_procs resource.setrlimit(resource.RLIMIT_NPROC, (max(cur_proc, target_proc), max_proc)) cur_hdls, max_hdls = resource.getrlimit(resource.RLIMIT_NOFILE) target_hdls = min(max_hdls, target_procs) if max_hdls > 0 else target_procs resource.setrlimit(resource.RLIMIT_NOFILE, (max(cur_hdls, target_hdls), max_hdls))
python
def _setup_resources(): """Attempt to increase resource limits up to hard limits. This allows us to avoid out of file handle limits where we can move beyond the soft limit up to the hard limit. """ target_procs = 10240 cur_proc, max_proc = resource.getrlimit(resource.RLIMIT_NPROC) target_proc = min(max_proc, target_procs) if max_proc > 0 else target_procs resource.setrlimit(resource.RLIMIT_NPROC, (max(cur_proc, target_proc), max_proc)) cur_hdls, max_hdls = resource.getrlimit(resource.RLIMIT_NOFILE) target_hdls = min(max_hdls, target_procs) if max_hdls > 0 else target_procs resource.setrlimit(resource.RLIMIT_NOFILE, (max(cur_hdls, target_hdls), max_hdls))
[ "def", "_setup_resources", "(", ")", ":", "target_procs", "=", "10240", "cur_proc", ",", "max_proc", "=", "resource", ".", "getrlimit", "(", "resource", ".", "RLIMIT_NPROC", ")", "target_proc", "=", "min", "(", "max_proc", ",", "target_procs", ")", "if", "ma...
Attempt to increase resource limits up to hard limits. This allows us to avoid out of file handle limits where we can move beyond the soft limit up to the hard limit.
[ "Attempt", "to", "increase", "resource", "limits", "up", "to", "hard", "limits", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/main.py#L57-L69
223,058
bcbio/bcbio-nextgen
bcbio/pipeline/main.py
_wres
def _wres(parallel, progs, fresources=None, ensure_mem=None): """Add resource information to the parallel environment on required programs and files. Enables spinning up required machines and operating in non-shared filesystem environments. progs -- Third party tools used in processing fresources -- Required file-based resources needed. These will be transferred on non-shared filesystems. ensure_mem -- Dictionary of required minimum memory for programs used. Ensures enough memory gets allocated on low-core machines. """ parallel = copy.deepcopy(parallel) parallel["progs"] = progs if fresources: parallel["fresources"] = fresources if ensure_mem: parallel["ensure_mem"] = ensure_mem return parallel
python
def _wres(parallel, progs, fresources=None, ensure_mem=None): """Add resource information to the parallel environment on required programs and files. Enables spinning up required machines and operating in non-shared filesystem environments. progs -- Third party tools used in processing fresources -- Required file-based resources needed. These will be transferred on non-shared filesystems. ensure_mem -- Dictionary of required minimum memory for programs used. Ensures enough memory gets allocated on low-core machines. """ parallel = copy.deepcopy(parallel) parallel["progs"] = progs if fresources: parallel["fresources"] = fresources if ensure_mem: parallel["ensure_mem"] = ensure_mem return parallel
[ "def", "_wres", "(", "parallel", ",", "progs", ",", "fresources", "=", "None", ",", "ensure_mem", "=", "None", ")", ":", "parallel", "=", "copy", ".", "deepcopy", "(", "parallel", ")", "parallel", "[", "\"progs\"", "]", "=", "progs", "if", "fresources", ...
Add resource information to the parallel environment on required programs and files. Enables spinning up required machines and operating in non-shared filesystem environments. progs -- Third party tools used in processing fresources -- Required file-based resources needed. These will be transferred on non-shared filesystems. ensure_mem -- Dictionary of required minimum memory for programs used. Ensures enough memory gets allocated on low-core machines.
[ "Add", "resource", "information", "to", "the", "parallel", "environment", "on", "required", "programs", "and", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/main.py#L94-L112
223,059
bcbio/bcbio-nextgen
bcbio/pipeline/main.py
rnaseq_prep_samples
def rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples): """ organizes RNA-seq and small-RNAseq samples, converting from BAM if necessary and trimming if necessary """ pipeline = dd.get_in_samples(samples, dd.get_analysis) trim_reads_set = any([tz.get_in(["algorithm", "trim_reads"], d) for d in dd.sample_data_iterator(samples)]) resources = ["picard"] needs_trimming = (_is_smallrnaseq(pipeline) or trim_reads_set) if needs_trimming: resources.append("atropos") with prun.start(_wres(parallel, resources), samples, config, dirs, "trimming", max_multicore=1 if not needs_trimming else None) as run_parallel: with profile.report("organize samples", dirs): samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml, [x[0]["description"] for x in samples]]]) samples = run_parallel("prepare_sample", samples) if needs_trimming: with profile.report("adapter trimming", dirs): if _is_smallrnaseq(pipeline): samples = run_parallel("trim_srna_sample", samples) else: samples = run_parallel("trim_sample", samples) return samples
python
def rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples): """ organizes RNA-seq and small-RNAseq samples, converting from BAM if necessary and trimming if necessary """ pipeline = dd.get_in_samples(samples, dd.get_analysis) trim_reads_set = any([tz.get_in(["algorithm", "trim_reads"], d) for d in dd.sample_data_iterator(samples)]) resources = ["picard"] needs_trimming = (_is_smallrnaseq(pipeline) or trim_reads_set) if needs_trimming: resources.append("atropos") with prun.start(_wres(parallel, resources), samples, config, dirs, "trimming", max_multicore=1 if not needs_trimming else None) as run_parallel: with profile.report("organize samples", dirs): samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml, [x[0]["description"] for x in samples]]]) samples = run_parallel("prepare_sample", samples) if needs_trimming: with profile.report("adapter trimming", dirs): if _is_smallrnaseq(pipeline): samples = run_parallel("trim_srna_sample", samples) else: samples = run_parallel("trim_sample", samples) return samples
[ "def", "rnaseq_prep_samples", "(", "config", ",", "run_info_yaml", ",", "parallel", ",", "dirs", ",", "samples", ")", ":", "pipeline", "=", "dd", ".", "get_in_samples", "(", "samples", ",", "dd", ".", "get_analysis", ")", "trim_reads_set", "=", "any", "(", ...
organizes RNA-seq and small-RNAseq samples, converting from BAM if necessary and trimming if necessary
[ "organizes", "RNA", "-", "seq", "and", "small", "-", "RNAseq", "samples", "converting", "from", "BAM", "if", "necessary", "and", "trimming", "if", "necessary" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/main.py#L394-L418
223,060
bcbio/bcbio-nextgen
bcbio/pipeline/main.py
_pair_samples_with_pipelines
def _pair_samples_with_pipelines(run_info_yaml, config): """Map samples defined in input file to pipelines to run. """ samples = config_utils.load_config(run_info_yaml) if isinstance(samples, dict): resources = samples.pop("resources") samples = samples["details"] else: resources = {} ready_samples = [] for sample in samples: if "files" in sample: del sample["files"] # add any resources to this item to recalculate global configuration usample = copy.deepcopy(sample) usample.pop("algorithm", None) if "resources" not in usample: usample["resources"] = {} for prog, pkvs in resources.items(): if prog not in usample["resources"]: usample["resources"][prog] = {} if pkvs is not None: for key, val in pkvs.items(): usample["resources"][prog][key] = val config = config_utils.update_w_custom(config, usample) sample["resources"] = {} ready_samples.append(sample) paired = [(x, _get_pipeline(x)) for x in ready_samples] d = defaultdict(list) for x in paired: d[x[1]].append([x[0]]) return d, config
python
def _pair_samples_with_pipelines(run_info_yaml, config): """Map samples defined in input file to pipelines to run. """ samples = config_utils.load_config(run_info_yaml) if isinstance(samples, dict): resources = samples.pop("resources") samples = samples["details"] else: resources = {} ready_samples = [] for sample in samples: if "files" in sample: del sample["files"] # add any resources to this item to recalculate global configuration usample = copy.deepcopy(sample) usample.pop("algorithm", None) if "resources" not in usample: usample["resources"] = {} for prog, pkvs in resources.items(): if prog not in usample["resources"]: usample["resources"][prog] = {} if pkvs is not None: for key, val in pkvs.items(): usample["resources"][prog][key] = val config = config_utils.update_w_custom(config, usample) sample["resources"] = {} ready_samples.append(sample) paired = [(x, _get_pipeline(x)) for x in ready_samples] d = defaultdict(list) for x in paired: d[x[1]].append([x[0]]) return d, config
[ "def", "_pair_samples_with_pipelines", "(", "run_info_yaml", ",", "config", ")", ":", "samples", "=", "config_utils", ".", "load_config", "(", "run_info_yaml", ")", "if", "isinstance", "(", "samples", ",", "dict", ")", ":", "resources", "=", "samples", ".", "p...
Map samples defined in input file to pipelines to run.
[ "Map", "samples", "defined", "in", "input", "file", "to", "pipelines", "to", "run", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/main.py#L430-L461
223,061
bcbio/bcbio-nextgen
bcbio/hla/bwakit.py
run
def run(data): """HLA typing with bwakit, parsing output from called genotype files. """ bwakit_dir = os.path.dirname(os.path.realpath(utils.which("run-bwamem"))) hla_fqs = tz.get_in(["hla", "fastq"], data, []) if len(hla_fqs) > 0: hla_base = os.path.commonprefix(hla_fqs) while hla_base.endswith("."): hla_base = hla_base[:-1] out_file = hla_base + ".top" if not utils.file_exists(out_file): cmd = "{bwakit_dir}/run-HLA {hla_base}" do.run(cmd.format(**locals()), "HLA typing with bwakit") out_file = _organize_calls(out_file, hla_base, data) data["hla"].update({"call_file": out_file, "hlacaller": "bwakit"}) return data
python
def run(data): """HLA typing with bwakit, parsing output from called genotype files. """ bwakit_dir = os.path.dirname(os.path.realpath(utils.which("run-bwamem"))) hla_fqs = tz.get_in(["hla", "fastq"], data, []) if len(hla_fqs) > 0: hla_base = os.path.commonprefix(hla_fqs) while hla_base.endswith("."): hla_base = hla_base[:-1] out_file = hla_base + ".top" if not utils.file_exists(out_file): cmd = "{bwakit_dir}/run-HLA {hla_base}" do.run(cmd.format(**locals()), "HLA typing with bwakit") out_file = _organize_calls(out_file, hla_base, data) data["hla"].update({"call_file": out_file, "hlacaller": "bwakit"}) return data
[ "def", "run", "(", "data", ")", ":", "bwakit_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "utils", ".", "which", "(", "\"run-bwamem\"", ")", ")", ")", "hla_fqs", "=", "tz", ".", "get_in", "(", "[", "...
HLA typing with bwakit, parsing output from called genotype files.
[ "HLA", "typing", "with", "bwakit", "parsing", "output", "from", "called", "genotype", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/bwakit.py#L18-L34
223,062
bcbio/bcbio-nextgen
bcbio/hla/bwakit.py
_organize_calls
def _organize_calls(out_file, hla_base, data): """Prepare genotype calls, reporting best call along with quality metrics. """ hla_truth = get_hla_truthset(data) sample = dd.get_sample_name(data) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(["sample", "locus", "mismatches", "options", "alleles", "p-groups", "expected", "validates"]) for genotype_file in glob.glob("%s.HLA-*.gt" % (hla_base)): hla_locus = os.path.basename(genotype_file).replace( "%s.HLA-" % os.path.basename(hla_base), "").replace(".gt", "") with open(genotype_file) as in_handle: total_options = set([]) for i, line in enumerate(in_handle): _, aone, atwo, m = line.split("\t")[:4] pgroups = (hla_groups.hla_protein(aone, data), hla_groups.hla_protein(atwo, data)) if i == 0: call_alleles = [aone, atwo] call_pgroups = pgroups mismatches = m total_options.add(pgroups) if len(total_options) > 0: truth_alleles = tz.get_in([sample, hla_locus], hla_truth, []) writer.writerow([sample, hla_locus, mismatches, len(total_options), ";".join(call_alleles), ";".join(call_pgroups), ";".join(truth_alleles), matches_truth(call_alleles, truth_alleles, data)]) return out_file
python
def _organize_calls(out_file, hla_base, data): """Prepare genotype calls, reporting best call along with quality metrics. """ hla_truth = get_hla_truthset(data) sample = dd.get_sample_name(data) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(["sample", "locus", "mismatches", "options", "alleles", "p-groups", "expected", "validates"]) for genotype_file in glob.glob("%s.HLA-*.gt" % (hla_base)): hla_locus = os.path.basename(genotype_file).replace( "%s.HLA-" % os.path.basename(hla_base), "").replace(".gt", "") with open(genotype_file) as in_handle: total_options = set([]) for i, line in enumerate(in_handle): _, aone, atwo, m = line.split("\t")[:4] pgroups = (hla_groups.hla_protein(aone, data), hla_groups.hla_protein(atwo, data)) if i == 0: call_alleles = [aone, atwo] call_pgroups = pgroups mismatches = m total_options.add(pgroups) if len(total_options) > 0: truth_alleles = tz.get_in([sample, hla_locus], hla_truth, []) writer.writerow([sample, hla_locus, mismatches, len(total_options), ";".join(call_alleles), ";".join(call_pgroups), ";".join(truth_alleles), matches_truth(call_alleles, truth_alleles, data)]) return out_file
[ "def", "_organize_calls", "(", "out_file", ",", "hla_base", ",", "data", ")", ":", "hla_truth", "=", "get_hla_truthset", "(", "data", ")", "sample", "=", "dd", ".", "get_sample_name", "(", "data", ")", "with", "file_transaction", "(", "data", ",", "out_file"...
Prepare genotype calls, reporting best call along with quality metrics.
[ "Prepare", "genotype", "calls", "reporting", "best", "call", "along", "with", "quality", "metrics", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/bwakit.py#L36-L64
223,063
bcbio/bcbio-nextgen
bcbio/hla/bwakit.py
matches_truth
def matches_truth(call_alleles, truth_alleles, data): """Flexibly check if truth and call alleles match, using p-groups. """ if not truth_alleles: return "" else: def _remove_p(x): return x[:-1] if x.endswith("P") else x t_cmp = set([_remove_p(hla_groups.hla_protein(x, data)) for x in truth_alleles]) c_cmp = set([_remove_p(hla_groups.hla_protein(x, data)) for x in call_alleles]) return "yes" if len(t_cmp.intersection(c_cmp)) == len(t_cmp) else "no"
python
def matches_truth(call_alleles, truth_alleles, data): """Flexibly check if truth and call alleles match, using p-groups. """ if not truth_alleles: return "" else: def _remove_p(x): return x[:-1] if x.endswith("P") else x t_cmp = set([_remove_p(hla_groups.hla_protein(x, data)) for x in truth_alleles]) c_cmp = set([_remove_p(hla_groups.hla_protein(x, data)) for x in call_alleles]) return "yes" if len(t_cmp.intersection(c_cmp)) == len(t_cmp) else "no"
[ "def", "matches_truth", "(", "call_alleles", ",", "truth_alleles", ",", "data", ")", ":", "if", "not", "truth_alleles", ":", "return", "\"\"", "else", ":", "def", "_remove_p", "(", "x", ")", ":", "return", "x", "[", ":", "-", "1", "]", "if", "x", "."...
Flexibly check if truth and call alleles match, using p-groups.
[ "Flexibly", "check", "if", "truth", "and", "call", "alleles", "match", "using", "p", "-", "groups", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/bwakit.py#L66-L76
223,064
bcbio/bcbio-nextgen
bcbio/hla/bwakit.py
get_hla_truthset
def get_hla_truthset(data): """Retrieve expected truth calls for annotating HLA called output. """ val_csv = tz.get_in(["config", "algorithm", "hlavalidate"], data) out = {} if val_csv and utils.file_exists(val_csv): with open(val_csv) as in_handle: reader = csv.reader(in_handle) next(reader) # header for sample, locus, alleles in (l for l in reader if l): out = tz.update_in(out, [sample, locus], lambda x: [x.strip() for x in alleles.split(";")]) return out
python
def get_hla_truthset(data): """Retrieve expected truth calls for annotating HLA called output. """ val_csv = tz.get_in(["config", "algorithm", "hlavalidate"], data) out = {} if val_csv and utils.file_exists(val_csv): with open(val_csv) as in_handle: reader = csv.reader(in_handle) next(reader) # header for sample, locus, alleles in (l for l in reader if l): out = tz.update_in(out, [sample, locus], lambda x: [x.strip() for x in alleles.split(";")]) return out
[ "def", "get_hla_truthset", "(", "data", ")", ":", "val_csv", "=", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"hlavalidate\"", "]", ",", "data", ")", "out", "=", "{", "}", "if", "val_csv", "and", "utils", ".", "file_exists",...
Retrieve expected truth calls for annotating HLA called output.
[ "Retrieve", "expected", "truth", "calls", "for", "annotating", "HLA", "called", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/bwakit.py#L78-L89
223,065
bcbio/bcbio-nextgen
scripts/utils/bam_to_fastq_region.py
bam_to_fastq_pair
def bam_to_fastq_pair(in_file, target_region, pair): """Generator to convert BAM files into name, seq, qual in a region. """ space, start, end = target_region bam_file = pysam.Samfile(in_file, "rb") for read in bam_file: if (not read.is_unmapped and not read.mate_is_unmapped and bam_file.getrname(read.tid) == space and bam_file.getrname(read.mrnm) == space and read.pos >= start and read.pos <= end and read.mpos >= start and read.mpos <= end and not read.is_secondary and read.is_paired and getattr(read, "is_read%s" % pair)): seq = Seq.Seq(read.seq) qual = list(read.qual) if read.is_reverse: seq = seq.reverse_complement() qual.reverse() yield read.qname, str(seq), "".join(qual)
python
def bam_to_fastq_pair(in_file, target_region, pair): """Generator to convert BAM files into name, seq, qual in a region. """ space, start, end = target_region bam_file = pysam.Samfile(in_file, "rb") for read in bam_file: if (not read.is_unmapped and not read.mate_is_unmapped and bam_file.getrname(read.tid) == space and bam_file.getrname(read.mrnm) == space and read.pos >= start and read.pos <= end and read.mpos >= start and read.mpos <= end and not read.is_secondary and read.is_paired and getattr(read, "is_read%s" % pair)): seq = Seq.Seq(read.seq) qual = list(read.qual) if read.is_reverse: seq = seq.reverse_complement() qual.reverse() yield read.qname, str(seq), "".join(qual)
[ "def", "bam_to_fastq_pair", "(", "in_file", ",", "target_region", ",", "pair", ")", ":", "space", ",", "start", ",", "end", "=", "target_region", "bam_file", "=", "pysam", ".", "Samfile", "(", "in_file", ",", "\"rb\"", ")", "for", "read", "in", "bam_file",...
Generator to convert BAM files into name, seq, qual in a region.
[ "Generator", "to", "convert", "BAM", "files", "into", "name", "seq", "qual", "in", "a", "region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/bam_to_fastq_region.py#L32-L50
223,066
bcbio/bcbio-nextgen
bcbio/bam/callable.py
sample_callable_bed
def sample_callable_bed(bam_file, ref_file, data): """Retrieve callable regions for a sample subset by defined analysis regions. """ from bcbio.heterogeneity import chromhacks CovInfo = collections.namedtuple("CovInfo", "callable, raw_callable, depth_files") noalt_calling = "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data) def callable_chrom_filter(r): """Filter to callable region, potentially limiting by chromosomes. """ return r.name == "CALLABLE" and (not noalt_calling or chromhacks.is_nonalt(r.chrom)) out_file = "%s-callable_sample.bed" % os.path.splitext(bam_file)[0] with shared.bedtools_tmpdir(data): sv_bed = regions.get_sv_bed(data) callable_bed, depth_files = coverage.calculate(bam_file, data, sv_bed) input_regions_bed = dd.get_variant_regions(data) if not utils.file_uptodate(out_file, callable_bed): with file_transaction(data, out_file) as tx_out_file: callable_regions = pybedtools.BedTool(callable_bed) filter_regions = callable_regions.filter(callable_chrom_filter) if input_regions_bed: if not utils.file_uptodate(out_file, input_regions_bed): input_regions = pybedtools.BedTool(input_regions_bed) filter_regions.intersect(input_regions, nonamecheck=True).saveas(tx_out_file) else: filter_regions.saveas(tx_out_file) return CovInfo(out_file, callable_bed, depth_files)
python
def sample_callable_bed(bam_file, ref_file, data): """Retrieve callable regions for a sample subset by defined analysis regions. """ from bcbio.heterogeneity import chromhacks CovInfo = collections.namedtuple("CovInfo", "callable, raw_callable, depth_files") noalt_calling = "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data) def callable_chrom_filter(r): """Filter to callable region, potentially limiting by chromosomes. """ return r.name == "CALLABLE" and (not noalt_calling or chromhacks.is_nonalt(r.chrom)) out_file = "%s-callable_sample.bed" % os.path.splitext(bam_file)[0] with shared.bedtools_tmpdir(data): sv_bed = regions.get_sv_bed(data) callable_bed, depth_files = coverage.calculate(bam_file, data, sv_bed) input_regions_bed = dd.get_variant_regions(data) if not utils.file_uptodate(out_file, callable_bed): with file_transaction(data, out_file) as tx_out_file: callable_regions = pybedtools.BedTool(callable_bed) filter_regions = callable_regions.filter(callable_chrom_filter) if input_regions_bed: if not utils.file_uptodate(out_file, input_regions_bed): input_regions = pybedtools.BedTool(input_regions_bed) filter_regions.intersect(input_regions, nonamecheck=True).saveas(tx_out_file) else: filter_regions.saveas(tx_out_file) return CovInfo(out_file, callable_bed, depth_files)
[ "def", "sample_callable_bed", "(", "bam_file", ",", "ref_file", ",", "data", ")", ":", "from", "bcbio", ".", "heterogeneity", "import", "chromhacks", "CovInfo", "=", "collections", ".", "namedtuple", "(", "\"CovInfo\"", ",", "\"callable, raw_callable, depth_files\"", ...
Retrieve callable regions for a sample subset by defined analysis regions.
[ "Retrieve", "callable", "regions", "for", "a", "sample", "subset", "by", "defined", "analysis", "regions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L32-L57
223,067
bcbio/bcbio-nextgen
bcbio/bam/callable.py
get_ref_bedtool
def get_ref_bedtool(ref_file, config, chrom=None): """Retrieve a pybedtool BedTool object with reference sizes from input reference. """ broad_runner = broad.runner_from_path("picard", config) ref_dict = broad_runner.run_fn("picard_index_ref", ref_file) ref_lines = [] with pysam.Samfile(ref_dict, "r") as ref_sam: for sq in ref_sam.header["SQ"]: if not chrom or sq["SN"] == chrom: ref_lines.append("%s\t%s\t%s" % (sq["SN"], 0, sq["LN"])) return pybedtools.BedTool("\n".join(ref_lines), from_string=True)
python
def get_ref_bedtool(ref_file, config, chrom=None): """Retrieve a pybedtool BedTool object with reference sizes from input reference. """ broad_runner = broad.runner_from_path("picard", config) ref_dict = broad_runner.run_fn("picard_index_ref", ref_file) ref_lines = [] with pysam.Samfile(ref_dict, "r") as ref_sam: for sq in ref_sam.header["SQ"]: if not chrom or sq["SN"] == chrom: ref_lines.append("%s\t%s\t%s" % (sq["SN"], 0, sq["LN"])) return pybedtools.BedTool("\n".join(ref_lines), from_string=True)
[ "def", "get_ref_bedtool", "(", "ref_file", ",", "config", ",", "chrom", "=", "None", ")", ":", "broad_runner", "=", "broad", ".", "runner_from_path", "(", "\"picard\"", ",", "config", ")", "ref_dict", "=", "broad_runner", ".", "run_fn", "(", "\"picard_index_re...
Retrieve a pybedtool BedTool object with reference sizes from input reference.
[ "Retrieve", "a", "pybedtool", "BedTool", "object", "with", "reference", "sizes", "from", "input", "reference", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L59-L69
223,068
bcbio/bcbio-nextgen
bcbio/bam/callable.py
_get_nblock_regions
def _get_nblock_regions(in_file, min_n_size, ref_regions): """Retrieve coordinates of regions in reference genome with no mapping. These are potential breakpoints for parallelizing analysis. """ out_lines = [] called_contigs = set([]) with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: contig, start, end, ctype = line.rstrip().split() called_contigs.add(contig) if (ctype in ["REF_N", "NO_COVERAGE", "EXCESSIVE_COVERAGE", "LOW_COVERAGE"] and int(end) - int(start) > min_n_size): out_lines.append("%s\t%s\t%s\n" % (contig, start, end)) for refr in ref_regions: if refr.chrom not in called_contigs: out_lines.append("%s\t%s\t%s\n" % (refr.chrom, 0, refr.stop)) return pybedtools.BedTool("\n".join(out_lines), from_string=True)
python
def _get_nblock_regions(in_file, min_n_size, ref_regions): """Retrieve coordinates of regions in reference genome with no mapping. These are potential breakpoints for parallelizing analysis. """ out_lines = [] called_contigs = set([]) with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: contig, start, end, ctype = line.rstrip().split() called_contigs.add(contig) if (ctype in ["REF_N", "NO_COVERAGE", "EXCESSIVE_COVERAGE", "LOW_COVERAGE"] and int(end) - int(start) > min_n_size): out_lines.append("%s\t%s\t%s\n" % (contig, start, end)) for refr in ref_regions: if refr.chrom not in called_contigs: out_lines.append("%s\t%s\t%s\n" % (refr.chrom, 0, refr.stop)) return pybedtools.BedTool("\n".join(out_lines), from_string=True)
[ "def", "_get_nblock_regions", "(", "in_file", ",", "min_n_size", ",", "ref_regions", ")", ":", "out_lines", "=", "[", "]", "called_contigs", "=", "set", "(", "[", "]", ")", "with", "utils", ".", "open_gzipsafe", "(", "in_file", ")", "as", "in_handle", ":",...
Retrieve coordinates of regions in reference genome with no mapping. These are potential breakpoints for parallelizing analysis.
[ "Retrieve", "coordinates", "of", "regions", "in", "reference", "genome", "with", "no", "mapping", ".", "These", "are", "potential", "breakpoints", "for", "parallelizing", "analysis", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L71-L87
223,069
bcbio/bcbio-nextgen
bcbio/bam/callable.py
_combine_regions
def _combine_regions(all_regions, ref_regions): """Combine multiple BEDtools regions of regions into sorted final BEDtool. """ chrom_order = {} for i, x in enumerate(ref_regions): chrom_order[x.chrom] = i def wchrom_key(x): chrom, start, end = x return (chrom_order[chrom], start, end) all_intervals = [] for region_group in all_regions: for region in region_group: all_intervals.append((region.chrom, int(region.start), int(region.stop))) all_intervals.sort(key=wchrom_key) bed_lines = ["%s\t%s\t%s" % (c, s, e) for (c, s, e) in all_intervals] return pybedtools.BedTool("\n".join(bed_lines), from_string=True)
python
def _combine_regions(all_regions, ref_regions): """Combine multiple BEDtools regions of regions into sorted final BEDtool. """ chrom_order = {} for i, x in enumerate(ref_regions): chrom_order[x.chrom] = i def wchrom_key(x): chrom, start, end = x return (chrom_order[chrom], start, end) all_intervals = [] for region_group in all_regions: for region in region_group: all_intervals.append((region.chrom, int(region.start), int(region.stop))) all_intervals.sort(key=wchrom_key) bed_lines = ["%s\t%s\t%s" % (c, s, e) for (c, s, e) in all_intervals] return pybedtools.BedTool("\n".join(bed_lines), from_string=True)
[ "def", "_combine_regions", "(", "all_regions", ",", "ref_regions", ")", ":", "chrom_order", "=", "{", "}", "for", "i", ",", "x", "in", "enumerate", "(", "ref_regions", ")", ":", "chrom_order", "[", "x", ".", "chrom", "]", "=", "i", "def", "wchrom_key", ...
Combine multiple BEDtools regions of regions into sorted final BEDtool.
[ "Combine", "multiple", "BEDtools", "regions", "of", "regions", "into", "sorted", "final", "BEDtool", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L89-L104
223,070
bcbio/bcbio-nextgen
bcbio/bam/callable.py
_add_config_regions
def _add_config_regions(nblock_regions, ref_regions, data): """Add additional nblock regions based on configured regions to call. Identifies user defined regions which we should not be analyzing. """ input_regions_bed = dd.get_variant_regions(data) if input_regions_bed: input_regions = pybedtools.BedTool(input_regions_bed) # work around problem with single region not subtracted correctly. if len(input_regions) == 1: str_regions = str(input_regions[0]).strip() input_regions = pybedtools.BedTool("%s\n%s" % (str_regions, str_regions), from_string=True) input_nblock = ref_regions.subtract(input_regions, nonamecheck=True) if input_nblock == ref_regions: raise ValueError("Input variant_region file (%s) " "excludes all genomic regions. Do the chromosome names " "in the BED file match your genome (chr1 vs 1)?" % input_regions_bed) all_intervals = _combine_regions([input_nblock, nblock_regions], ref_regions) else: all_intervals = nblock_regions if "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data): from bcbio.heterogeneity import chromhacks remove_intervals = ref_regions.filter(lambda r: not chromhacks.is_nonalt(r.chrom)) all_intervals = _combine_regions([all_intervals, remove_intervals], ref_regions) return all_intervals.merge()
python
def _add_config_regions(nblock_regions, ref_regions, data): """Add additional nblock regions based on configured regions to call. Identifies user defined regions which we should not be analyzing. """ input_regions_bed = dd.get_variant_regions(data) if input_regions_bed: input_regions = pybedtools.BedTool(input_regions_bed) # work around problem with single region not subtracted correctly. if len(input_regions) == 1: str_regions = str(input_regions[0]).strip() input_regions = pybedtools.BedTool("%s\n%s" % (str_regions, str_regions), from_string=True) input_nblock = ref_regions.subtract(input_regions, nonamecheck=True) if input_nblock == ref_regions: raise ValueError("Input variant_region file (%s) " "excludes all genomic regions. Do the chromosome names " "in the BED file match your genome (chr1 vs 1)?" % input_regions_bed) all_intervals = _combine_regions([input_nblock, nblock_regions], ref_regions) else: all_intervals = nblock_regions if "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data): from bcbio.heterogeneity import chromhacks remove_intervals = ref_regions.filter(lambda r: not chromhacks.is_nonalt(r.chrom)) all_intervals = _combine_regions([all_intervals, remove_intervals], ref_regions) return all_intervals.merge()
[ "def", "_add_config_regions", "(", "nblock_regions", ",", "ref_regions", ",", "data", ")", ":", "input_regions_bed", "=", "dd", ".", "get_variant_regions", "(", "data", ")", "if", "input_regions_bed", ":", "input_regions", "=", "pybedtools", ".", "BedTool", "(", ...
Add additional nblock regions based on configured regions to call. Identifies user defined regions which we should not be analyzing.
[ "Add", "additional", "nblock", "regions", "based", "on", "configured", "regions", "to", "call", ".", "Identifies", "user", "defined", "regions", "which", "we", "should", "not", "be", "analyzing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L106-L130
223,071
bcbio/bcbio-nextgen
bcbio/bam/callable.py
block_regions
def block_regions(callable_bed, in_bam, ref_file, data): """Find blocks of regions for analysis from mapped input BAM file. Identifies islands of callable regions, surrounding by regions with no read support, that can be analyzed independently. """ min_n_size = int(data["config"]["algorithm"].get("nomap_split_size", 250)) with shared.bedtools_tmpdir(data): nblock_bed = "%s-nblocks.bed" % utils.splitext_plus(callable_bed)[0] callblock_bed = "%s-callableblocks.bed" % utils.splitext_plus(callable_bed)[0] if not utils.file_uptodate(nblock_bed, callable_bed): ref_regions = get_ref_bedtool(ref_file, data["config"]) nblock_regions = _get_nblock_regions(callable_bed, min_n_size, ref_regions) nblock_regions = _add_config_regions(nblock_regions, ref_regions, data) with file_transaction(data, nblock_bed, callblock_bed) as (tx_nblock_bed, tx_callblock_bed): nblock_regions.filter(lambda r: len(r) > min_n_size).saveas(tx_nblock_bed) if len(ref_regions.subtract(nblock_regions, nonamecheck=True)) > 0: ref_regions.subtract(tx_nblock_bed, nonamecheck=True).merge(d=min_n_size).saveas(tx_callblock_bed) else: raise ValueError("No callable regions found in %s from BAM file %s. Some causes:\n " " - Alignment regions do not overlap with regions found " "in your `variant_regions` BED: %s\n" " - There are no aligned reads in your BAM file that pass sanity checks " " (mapping score > 1, non-duplicates, both ends of paired reads mapped)" % (dd.get_sample_name(data), in_bam, dd.get_variant_regions(data))) return callblock_bed, nblock_bed
python
def block_regions(callable_bed, in_bam, ref_file, data): """Find blocks of regions for analysis from mapped input BAM file. Identifies islands of callable regions, surrounding by regions with no read support, that can be analyzed independently. """ min_n_size = int(data["config"]["algorithm"].get("nomap_split_size", 250)) with shared.bedtools_tmpdir(data): nblock_bed = "%s-nblocks.bed" % utils.splitext_plus(callable_bed)[0] callblock_bed = "%s-callableblocks.bed" % utils.splitext_plus(callable_bed)[0] if not utils.file_uptodate(nblock_bed, callable_bed): ref_regions = get_ref_bedtool(ref_file, data["config"]) nblock_regions = _get_nblock_regions(callable_bed, min_n_size, ref_regions) nblock_regions = _add_config_regions(nblock_regions, ref_regions, data) with file_transaction(data, nblock_bed, callblock_bed) as (tx_nblock_bed, tx_callblock_bed): nblock_regions.filter(lambda r: len(r) > min_n_size).saveas(tx_nblock_bed) if len(ref_regions.subtract(nblock_regions, nonamecheck=True)) > 0: ref_regions.subtract(tx_nblock_bed, nonamecheck=True).merge(d=min_n_size).saveas(tx_callblock_bed) else: raise ValueError("No callable regions found in %s from BAM file %s. Some causes:\n " " - Alignment regions do not overlap with regions found " "in your `variant_regions` BED: %s\n" " - There are no aligned reads in your BAM file that pass sanity checks " " (mapping score > 1, non-duplicates, both ends of paired reads mapped)" % (dd.get_sample_name(data), in_bam, dd.get_variant_regions(data))) return callblock_bed, nblock_bed
[ "def", "block_regions", "(", "callable_bed", ",", "in_bam", ",", "ref_file", ",", "data", ")", ":", "min_n_size", "=", "int", "(", "data", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", ".", "get", "(", "\"nomap_split_size\"", ",", "250", ")", ")", "...
Find blocks of regions for analysis from mapped input BAM file. Identifies islands of callable regions, surrounding by regions with no read support, that can be analyzed independently.
[ "Find", "blocks", "of", "regions", "for", "analysis", "from", "mapped", "input", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L182-L207
223,072
bcbio/bcbio-nextgen
bcbio/bam/callable.py
_analysis_block_stats
def _analysis_block_stats(regions, samples): """Provide statistics on sizes and number of analysis blocks. """ prev = None between_sizes = [] region_sizes = [] for region in regions: if prev and prev.chrom == region.chrom: between_sizes.append(region.start - prev.end) region_sizes.append(region.end - region.start) prev = region def descriptive_stats(xs): if len(xs) < 2: return xs parts = ["min: %s" % min(xs), "5%%: %s" % numpy.percentile(xs, 5), "25%%: %s" % numpy.percentile(xs, 25), "median: %s" % numpy.percentile(xs, 50), "75%%: %s" % numpy.percentile(xs, 75), "95%%: %s" % numpy.percentile(xs, 95), "99%%: %s" % numpy.percentile(xs, 99), "max: %s" % max(xs)] return "\n".join([" " + x for x in parts]) logger.info("Identified %s parallel analysis blocks\n" % len(region_sizes) + "Block sizes:\n%s\n" % descriptive_stats(region_sizes) + "Between block sizes:\n%s\n" % descriptive_stats(between_sizes)) if len(region_sizes) == 0: raise ValueError("No callable regions found in: %s" % (", ".join([dd.get_sample_name(x) for x in samples])))
python
def _analysis_block_stats(regions, samples): """Provide statistics on sizes and number of analysis blocks. """ prev = None between_sizes = [] region_sizes = [] for region in regions: if prev and prev.chrom == region.chrom: between_sizes.append(region.start - prev.end) region_sizes.append(region.end - region.start) prev = region def descriptive_stats(xs): if len(xs) < 2: return xs parts = ["min: %s" % min(xs), "5%%: %s" % numpy.percentile(xs, 5), "25%%: %s" % numpy.percentile(xs, 25), "median: %s" % numpy.percentile(xs, 50), "75%%: %s" % numpy.percentile(xs, 75), "95%%: %s" % numpy.percentile(xs, 95), "99%%: %s" % numpy.percentile(xs, 99), "max: %s" % max(xs)] return "\n".join([" " + x for x in parts]) logger.info("Identified %s parallel analysis blocks\n" % len(region_sizes) + "Block sizes:\n%s\n" % descriptive_stats(region_sizes) + "Between block sizes:\n%s\n" % descriptive_stats(between_sizes)) if len(region_sizes) == 0: raise ValueError("No callable regions found in: %s" % (", ".join([dd.get_sample_name(x) for x in samples])))
[ "def", "_analysis_block_stats", "(", "regions", ",", "samples", ")", ":", "prev", "=", "None", "between_sizes", "=", "[", "]", "region_sizes", "=", "[", "]", "for", "region", "in", "regions", ":", "if", "prev", "and", "prev", ".", "chrom", "==", "region"...
Provide statistics on sizes and number of analysis blocks.
[ "Provide", "statistics", "on", "sizes", "and", "number", "of", "analysis", "blocks", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L216-L244
223,073
bcbio/bcbio-nextgen
bcbio/bam/callable.py
_needs_region_update
def _needs_region_update(out_file, samples): """Check if we need to update BED file of regions, supporting back compatibility. """ nblock_files = [x["regions"]["nblock"] for x in samples if "regions" in x] # For older approaches and do not create a new set of analysis # regions, since the new algorithm will re-do all BAM and variant # steps with new regions for nblock_file in nblock_files: test_old = nblock_file.replace("-nblocks", "-analysisblocks") if os.path.exists(test_old): return False # Check if any of the local files have changed so we need to refresh for noblock_file in nblock_files: if not utils.file_uptodate(out_file, noblock_file): return True return False
python
def _needs_region_update(out_file, samples): """Check if we need to update BED file of regions, supporting back compatibility. """ nblock_files = [x["regions"]["nblock"] for x in samples if "regions" in x] # For older approaches and do not create a new set of analysis # regions, since the new algorithm will re-do all BAM and variant # steps with new regions for nblock_file in nblock_files: test_old = nblock_file.replace("-nblocks", "-analysisblocks") if os.path.exists(test_old): return False # Check if any of the local files have changed so we need to refresh for noblock_file in nblock_files: if not utils.file_uptodate(out_file, noblock_file): return True return False
[ "def", "_needs_region_update", "(", "out_file", ",", "samples", ")", ":", "nblock_files", "=", "[", "x", "[", "\"regions\"", "]", "[", "\"nblock\"", "]", "for", "x", "in", "samples", "if", "\"regions\"", "in", "x", "]", "# For older approaches and do not create ...
Check if we need to update BED file of regions, supporting back compatibility.
[ "Check", "if", "we", "need", "to", "update", "BED", "file", "of", "regions", "supporting", "back", "compatibility", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L246-L261
223,074
bcbio/bcbio-nextgen
bcbio/bam/callable.py
combine_sample_regions
def combine_sample_regions(*samples): """Create batch-level sets of callable regions for multi-sample calling. Intersects all non-callable (nblock) regions from all samples in a batch, producing a global set of callable regions. """ samples = utils.unpack_worlds(samples) samples = cwlutils.unpack_tarballs(samples, samples[0]) # back compatibility -- global file for entire sample set global_analysis_file = os.path.join(samples[0]["dirs"]["work"], "analysis_blocks.bed") if utils.file_exists(global_analysis_file) and not _needs_region_update(global_analysis_file, samples): global_no_analysis_file = os.path.join(os.path.dirname(global_analysis_file), "noanalysis_blocks.bed") else: global_analysis_file = None out = [] analysis_files = [] batches = [] with shared.bedtools_tmpdir(samples[0]): for batch, items in vmulti.group_by_batch(samples, require_bam=False).items(): batches.append(items) if global_analysis_file: analysis_file, no_analysis_file = global_analysis_file, global_no_analysis_file else: analysis_file, no_analysis_file = _combine_sample_regions_batch(batch, items) for data in items: vr_file = dd.get_variant_regions(data) if analysis_file: analysis_files.append(analysis_file) data["config"]["algorithm"]["callable_regions"] = analysis_file data["config"]["algorithm"]["non_callable_regions"] = no_analysis_file data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(analysis_file).count() elif vr_file: data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(vr_file).count() # attach a representative sample for calculating callable region if not data.get("work_bam"): for x in items: if x.get("work_bam"): data["work_bam_callable"] = x["work_bam"] out.append([data]) # Ensure output order matches input order, consistency for CWL-based runs assert len(out) == len(samples) sample_indexes = {dd.get_sample_name(d): i for i, d in enumerate(samples)} def by_input_index(xs): return sample_indexes[dd.get_sample_name(xs[0])] out.sort(key=by_input_index) if len(analysis_files) > 0: final_regions = pybedtools.BedTool(analysis_files[0]) _analysis_block_stats(final_regions, batches[0]) return out
python
def combine_sample_regions(*samples): """Create batch-level sets of callable regions for multi-sample calling. Intersects all non-callable (nblock) regions from all samples in a batch, producing a global set of callable regions. """ samples = utils.unpack_worlds(samples) samples = cwlutils.unpack_tarballs(samples, samples[0]) # back compatibility -- global file for entire sample set global_analysis_file = os.path.join(samples[0]["dirs"]["work"], "analysis_blocks.bed") if utils.file_exists(global_analysis_file) and not _needs_region_update(global_analysis_file, samples): global_no_analysis_file = os.path.join(os.path.dirname(global_analysis_file), "noanalysis_blocks.bed") else: global_analysis_file = None out = [] analysis_files = [] batches = [] with shared.bedtools_tmpdir(samples[0]): for batch, items in vmulti.group_by_batch(samples, require_bam=False).items(): batches.append(items) if global_analysis_file: analysis_file, no_analysis_file = global_analysis_file, global_no_analysis_file else: analysis_file, no_analysis_file = _combine_sample_regions_batch(batch, items) for data in items: vr_file = dd.get_variant_regions(data) if analysis_file: analysis_files.append(analysis_file) data["config"]["algorithm"]["callable_regions"] = analysis_file data["config"]["algorithm"]["non_callable_regions"] = no_analysis_file data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(analysis_file).count() elif vr_file: data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(vr_file).count() # attach a representative sample for calculating callable region if not data.get("work_bam"): for x in items: if x.get("work_bam"): data["work_bam_callable"] = x["work_bam"] out.append([data]) # Ensure output order matches input order, consistency for CWL-based runs assert len(out) == len(samples) sample_indexes = {dd.get_sample_name(d): i for i, d in enumerate(samples)} def by_input_index(xs): return sample_indexes[dd.get_sample_name(xs[0])] out.sort(key=by_input_index) if len(analysis_files) > 0: final_regions = pybedtools.BedTool(analysis_files[0]) _analysis_block_stats(final_regions, batches[0]) return out
[ "def", "combine_sample_regions", "(", "*", "samples", ")", ":", "samples", "=", "utils", ".", "unpack_worlds", "(", "samples", ")", "samples", "=", "cwlutils", ".", "unpack_tarballs", "(", "samples", ",", "samples", "[", "0", "]", ")", "# back compatibility --...
Create batch-level sets of callable regions for multi-sample calling. Intersects all non-callable (nblock) regions from all samples in a batch, producing a global set of callable regions.
[ "Create", "batch", "-", "level", "sets", "of", "callable", "regions", "for", "multi", "-", "sample", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L263-L311
223,075
bcbio/bcbio-nextgen
bcbio/bam/callable.py
_combine_sample_regions_batch
def _combine_sample_regions_batch(batch, items): """Combine sample regions within a group of batched samples. """ config = items[0]["config"] work_dir = utils.safe_makedir(os.path.join(items[0]["dirs"]["work"], "regions")) analysis_file = os.path.join(work_dir, "%s-analysis_blocks.bed" % batch) no_analysis_file = os.path.join(work_dir, "%s-noanalysis_blocks.bed" % batch) if not utils.file_exists(analysis_file) or _needs_region_update(analysis_file, items): # Combine all nblocks into a final set of intersecting regions # without callable bases. HT @brentp for intersection approach # https://groups.google.com/forum/?fromgroups#!topic/bedtools-discuss/qA9wK4zN8do bed_regions = [pybedtools.BedTool(x["regions"]["nblock"]) for x in items if "regions" in x and x["regions"]["nblock"]] if len(bed_regions) == 0: analysis_file, no_analysis_file = None, None else: with file_transaction(items[0], analysis_file, no_analysis_file) as (tx_afile, tx_noafile): def intersect_two(a, b): return a.intersect(b, nonamecheck=True).saveas() nblock_regions = reduce(intersect_two, bed_regions).saveas( "%s-nblock%s" % utils.splitext_plus(tx_afile)) ref_file = tz.get_in(["reference", "fasta", "base"], items[0]) ref_regions = get_ref_bedtool(ref_file, config) min_n_size = int(config["algorithm"].get("nomap_split_size", 250)) block_filter = NBlockRegionPicker(ref_regions, config, min_n_size) final_nblock_regions = nblock_regions.filter( block_filter.include_block).saveas().each(block_filter.expand_block).saveas( "%s-nblockfinal%s" % utils.splitext_plus(tx_afile)) final_regions = ref_regions.subtract(final_nblock_regions, nonamecheck=True).\ saveas().merge(d=min_n_size) _write_bed_regions(items[0], final_regions, tx_afile, tx_noafile) if analysis_file and utils.file_exists(analysis_file): return analysis_file, no_analysis_file else: return None, None
python
def _combine_sample_regions_batch(batch, items): """Combine sample regions within a group of batched samples. """ config = items[0]["config"] work_dir = utils.safe_makedir(os.path.join(items[0]["dirs"]["work"], "regions")) analysis_file = os.path.join(work_dir, "%s-analysis_blocks.bed" % batch) no_analysis_file = os.path.join(work_dir, "%s-noanalysis_blocks.bed" % batch) if not utils.file_exists(analysis_file) or _needs_region_update(analysis_file, items): # Combine all nblocks into a final set of intersecting regions # without callable bases. HT @brentp for intersection approach # https://groups.google.com/forum/?fromgroups#!topic/bedtools-discuss/qA9wK4zN8do bed_regions = [pybedtools.BedTool(x["regions"]["nblock"]) for x in items if "regions" in x and x["regions"]["nblock"]] if len(bed_regions) == 0: analysis_file, no_analysis_file = None, None else: with file_transaction(items[0], analysis_file, no_analysis_file) as (tx_afile, tx_noafile): def intersect_two(a, b): return a.intersect(b, nonamecheck=True).saveas() nblock_regions = reduce(intersect_two, bed_regions).saveas( "%s-nblock%s" % utils.splitext_plus(tx_afile)) ref_file = tz.get_in(["reference", "fasta", "base"], items[0]) ref_regions = get_ref_bedtool(ref_file, config) min_n_size = int(config["algorithm"].get("nomap_split_size", 250)) block_filter = NBlockRegionPicker(ref_regions, config, min_n_size) final_nblock_regions = nblock_regions.filter( block_filter.include_block).saveas().each(block_filter.expand_block).saveas( "%s-nblockfinal%s" % utils.splitext_plus(tx_afile)) final_regions = ref_regions.subtract(final_nblock_regions, nonamecheck=True).\ saveas().merge(d=min_n_size) _write_bed_regions(items[0], final_regions, tx_afile, tx_noafile) if analysis_file and utils.file_exists(analysis_file): return analysis_file, no_analysis_file else: return None, None
[ "def", "_combine_sample_regions_batch", "(", "batch", ",", "items", ")", ":", "config", "=", "items", "[", "0", "]", "[", "\"config\"", "]", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "items", "[", "0", "]"...
Combine sample regions within a group of batched samples.
[ "Combine", "sample", "regions", "within", "a", "group", "of", "batched", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L313-L347
223,076
bcbio/bcbio-nextgen
bcbio/bam/callable.py
get_split_regions
def get_split_regions(bed_file, data): """Retrieve a set of split regions using the input BED for callable regions. Provides a less inclusive hook for parallelizing over multiple regions. """ out_file = "%s-analysis_blocks.bed" % utils.splitext_plus(bed_file)[0] with shared.bedtools_tmpdir(data): if not utils.file_uptodate(out_file, bed_file): ref_regions = get_ref_bedtool(dd.get_ref_file(data), data["config"]) nblock_regions = ref_regions.subtract(pybedtools.BedTool(bed_file)).saveas() min_n_size = int(tz.get_in(["config", "algorithm", "nomap_split_size"], data, 250)) block_filter = NBlockRegionPicker(ref_regions, data["config"], min_n_size) final_nblock_regions = nblock_regions.filter( block_filter.include_block).saveas().each(block_filter.expand_block).saveas() with file_transaction(data, out_file) as tx_out_file: final_regions = ref_regions.subtract(final_nblock_regions, nonamecheck=True).\ saveas().merge(d=min_n_size).saveas(tx_out_file) chroms = set([]) with shared.bedtools_tmpdir(data): for r in pybedtools.BedTool(bed_file): chroms.add(r.chrom) out = [] for r in pybedtools.BedTool(out_file): if r.chrom in chroms: out.append((r.chrom, r.start, r.stop)) return out
python
def get_split_regions(bed_file, data): """Retrieve a set of split regions using the input BED for callable regions. Provides a less inclusive hook for parallelizing over multiple regions. """ out_file = "%s-analysis_blocks.bed" % utils.splitext_plus(bed_file)[0] with shared.bedtools_tmpdir(data): if not utils.file_uptodate(out_file, bed_file): ref_regions = get_ref_bedtool(dd.get_ref_file(data), data["config"]) nblock_regions = ref_regions.subtract(pybedtools.BedTool(bed_file)).saveas() min_n_size = int(tz.get_in(["config", "algorithm", "nomap_split_size"], data, 250)) block_filter = NBlockRegionPicker(ref_regions, data["config"], min_n_size) final_nblock_regions = nblock_regions.filter( block_filter.include_block).saveas().each(block_filter.expand_block).saveas() with file_transaction(data, out_file) as tx_out_file: final_regions = ref_regions.subtract(final_nblock_regions, nonamecheck=True).\ saveas().merge(d=min_n_size).saveas(tx_out_file) chroms = set([]) with shared.bedtools_tmpdir(data): for r in pybedtools.BedTool(bed_file): chroms.add(r.chrom) out = [] for r in pybedtools.BedTool(out_file): if r.chrom in chroms: out.append((r.chrom, r.start, r.stop)) return out
[ "def", "get_split_regions", "(", "bed_file", ",", "data", ")", ":", "out_file", "=", "\"%s-analysis_blocks.bed\"", "%", "utils", ".", "splitext_plus", "(", "bed_file", ")", "[", "0", "]", "with", "shared", ".", "bedtools_tmpdir", "(", "data", ")", ":", "if",...
Retrieve a set of split regions using the input BED for callable regions. Provides a less inclusive hook for parallelizing over multiple regions.
[ "Retrieve", "a", "set", "of", "split", "regions", "using", "the", "input", "BED", "for", "callable", "regions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L349-L374
223,077
bcbio/bcbio-nextgen
bcbio/bam/callable.py
NBlockRegionPicker.include_block
def include_block(self, x): """Check for inclusion of block based on distance from previous. """ last_pos = self._chr_last_blocks.get(x.chrom, 0) # Region excludes an entire chromosome, typically decoy/haplotypes if last_pos <= self._end_buffer and x.stop >= self._ref_sizes.get(x.chrom, 0) - self._end_buffer: return True # Do not split on smaller decoy and haplotype chromosomes elif self._ref_sizes.get(x.chrom, 0) <= self._target_size: return False elif (x.start - last_pos) > self._target_size: self._chr_last_blocks[x.chrom] = x.stop return True else: return False
python
def include_block(self, x): """Check for inclusion of block based on distance from previous. """ last_pos = self._chr_last_blocks.get(x.chrom, 0) # Region excludes an entire chromosome, typically decoy/haplotypes if last_pos <= self._end_buffer and x.stop >= self._ref_sizes.get(x.chrom, 0) - self._end_buffer: return True # Do not split on smaller decoy and haplotype chromosomes elif self._ref_sizes.get(x.chrom, 0) <= self._target_size: return False elif (x.start - last_pos) > self._target_size: self._chr_last_blocks[x.chrom] = x.stop return True else: return False
[ "def", "include_block", "(", "self", ",", "x", ")", ":", "last_pos", "=", "self", ".", "_chr_last_blocks", ".", "get", "(", "x", ".", "chrom", ",", "0", ")", "# Region excludes an entire chromosome, typically decoy/haplotypes", "if", "last_pos", "<=", "self", "....
Check for inclusion of block based on distance from previous.
[ "Check", "for", "inclusion", "of", "block", "based", "on", "distance", "from", "previous", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L155-L169
223,078
bcbio/bcbio-nextgen
bcbio/bam/callable.py
NBlockRegionPicker.expand_block
def expand_block(self, feat): """Expand any blocks which are near the start or end of a contig. """ chrom_end = self._ref_sizes.get(feat.chrom) if chrom_end: if feat.start < self._end_buffer: feat.start = 0 if feat.stop >= chrom_end - self._end_buffer: feat.stop = chrom_end return feat
python
def expand_block(self, feat): """Expand any blocks which are near the start or end of a contig. """ chrom_end = self._ref_sizes.get(feat.chrom) if chrom_end: if feat.start < self._end_buffer: feat.start = 0 if feat.stop >= chrom_end - self._end_buffer: feat.stop = chrom_end return feat
[ "def", "expand_block", "(", "self", ",", "feat", ")", ":", "chrom_end", "=", "self", ".", "_ref_sizes", ".", "get", "(", "feat", ".", "chrom", ")", "if", "chrom_end", ":", "if", "feat", ".", "start", "<", "self", ".", "_end_buffer", ":", "feat", ".",...
Expand any blocks which are near the start or end of a contig.
[ "Expand", "any", "blocks", "which", "are", "near", "the", "start", "or", "end", "of", "a", "contig", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L171-L180
223,079
bcbio/bcbio-nextgen
bcbio/chipseq/__init__.py
_keep_assembled_chrom
def _keep_assembled_chrom(bam_file, genome, config): """Remove contigs from the BAM file""" fai = "%s.fai" % genome chrom = [] with open(fai) as inh: for line in inh: c = line.split("\t")[0] if c.find("_") < 0: chrom.append(c) chroms = " ".join(chrom) out_file = utils.append_stem(bam_file, '_chrom') samtools = config_utils.get_program("samtools", config) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out: cmd = "{samtools} view -b {bam_file} {chroms} > {tx_out}" do.run(cmd.format(**locals()), "Remove contigs from %s" % bam_file) bam.index(out_file, config) return out_file
python
def _keep_assembled_chrom(bam_file, genome, config): """Remove contigs from the BAM file""" fai = "%s.fai" % genome chrom = [] with open(fai) as inh: for line in inh: c = line.split("\t")[0] if c.find("_") < 0: chrom.append(c) chroms = " ".join(chrom) out_file = utils.append_stem(bam_file, '_chrom') samtools = config_utils.get_program("samtools", config) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out: cmd = "{samtools} view -b {bam_file} {chroms} > {tx_out}" do.run(cmd.format(**locals()), "Remove contigs from %s" % bam_file) bam.index(out_file, config) return out_file
[ "def", "_keep_assembled_chrom", "(", "bam_file", ",", "genome", ",", "config", ")", ":", "fai", "=", "\"%s.fai\"", "%", "genome", "chrom", "=", "[", "]", "with", "open", "(", "fai", ")", "as", "inh", ":", "for", "line", "in", "inh", ":", "c", "=", ...
Remove contigs from the BAM file
[ "Remove", "contigs", "from", "the", "BAM", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/__init__.py#L40-L57
223,080
bcbio/bcbio-nextgen
bcbio/chipseq/__init__.py
_prepare_bam
def _prepare_bam(bam_file, bed_file, config): """Remove regions from bed files""" if not bam_file or not bed_file: return bam_file out_file = utils.append_stem(bam_file, '_filter') bedtools = config_utils.get_program("bedtools", config) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out: cmd = "{bedtools} subtract -nonamecheck -A -a {bam_file} -b {bed_file} > {tx_out}" do.run(cmd.format(**locals()), "Remove blacklist regions from %s" % bam_file) return out_file
python
def _prepare_bam(bam_file, bed_file, config): """Remove regions from bed files""" if not bam_file or not bed_file: return bam_file out_file = utils.append_stem(bam_file, '_filter') bedtools = config_utils.get_program("bedtools", config) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out: cmd = "{bedtools} subtract -nonamecheck -A -a {bam_file} -b {bed_file} > {tx_out}" do.run(cmd.format(**locals()), "Remove blacklist regions from %s" % bam_file) return out_file
[ "def", "_prepare_bam", "(", "bam_file", ",", "bed_file", ",", "config", ")", ":", "if", "not", "bam_file", "or", "not", "bed_file", ":", "return", "bam_file", "out_file", "=", "utils", ".", "append_stem", "(", "bam_file", ",", "'_filter'", ")", "bedtools", ...
Remove regions from bed files
[ "Remove", "regions", "from", "bed", "files" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/__init__.py#L60-L70
223,081
bcbio/bcbio-nextgen
bcbio/chipseq/__init__.py
_bam_coverage
def _bam_coverage(name, bam_input, data): """Run bamCoverage from deeptools""" cmd = ("{bam_coverage} -b {bam_input} -o {bw_output} " "--binSize 20 --effectiveGenomeSize {size} " "--smoothLength 60 --extendReads 150 --centerReads -p {cores}") size = bam.fasta.total_sequence_length(dd.get_ref_file(data)) cores = dd.get_num_cores(data) try: bam_coverage = config_utils.get_program("bamCoverage", data) except config_utils.CmdNotFound: logger.info("No bamCoverage found, skipping bamCoverage.") return None resources = config_utils.get_resources("bamCoverage", data["config"]) if resources: options = resources.get("options") if options: cmd += " %s" % " ".join([str(x) for x in options]) bw_output = os.path.join(os.path.dirname(bam_input), "%s.bw" % name) if utils.file_exists(bw_output): return bw_output with file_transaction(bw_output) as out_tx: do.run(cmd.format(**locals()), "Run bamCoverage in %s" % name) return bw_output
python
def _bam_coverage(name, bam_input, data): """Run bamCoverage from deeptools""" cmd = ("{bam_coverage} -b {bam_input} -o {bw_output} " "--binSize 20 --effectiveGenomeSize {size} " "--smoothLength 60 --extendReads 150 --centerReads -p {cores}") size = bam.fasta.total_sequence_length(dd.get_ref_file(data)) cores = dd.get_num_cores(data) try: bam_coverage = config_utils.get_program("bamCoverage", data) except config_utils.CmdNotFound: logger.info("No bamCoverage found, skipping bamCoverage.") return None resources = config_utils.get_resources("bamCoverage", data["config"]) if resources: options = resources.get("options") if options: cmd += " %s" % " ".join([str(x) for x in options]) bw_output = os.path.join(os.path.dirname(bam_input), "%s.bw" % name) if utils.file_exists(bw_output): return bw_output with file_transaction(bw_output) as out_tx: do.run(cmd.format(**locals()), "Run bamCoverage in %s" % name) return bw_output
[ "def", "_bam_coverage", "(", "name", ",", "bam_input", ",", "data", ")", ":", "cmd", "=", "(", "\"{bam_coverage} -b {bam_input} -o {bw_output} \"", "\"--binSize 20 --effectiveGenomeSize {size} \"", "\"--smoothLength 60 --extendReads 150 --centerReads -p {cores}\"", ")", "size", "...
Run bamCoverage from deeptools
[ "Run", "bamCoverage", "from", "deeptools" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/__init__.py#L72-L94
223,082
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_get_out_file
def _get_out_file(work_dir, paired): """Retrieve manta output variant file, depending on analysis. """ if paired: if paired.normal_bam: base_file = "somaticSV.vcf.gz" else: base_file = "tumorSV.vcf.gz" else: base_file = "diploidSV.vcf.gz" return os.path.join(work_dir, "results", "variants", base_file)
python
def _get_out_file(work_dir, paired): """Retrieve manta output variant file, depending on analysis. """ if paired: if paired.normal_bam: base_file = "somaticSV.vcf.gz" else: base_file = "tumorSV.vcf.gz" else: base_file = "diploidSV.vcf.gz" return os.path.join(work_dir, "results", "variants", base_file)
[ "def", "_get_out_file", "(", "work_dir", ",", "paired", ")", ":", "if", "paired", ":", "if", "paired", ".", "normal_bam", ":", "base_file", "=", "\"somaticSV.vcf.gz\"", "else", ":", "base_file", "=", "\"tumorSV.vcf.gz\"", "else", ":", "base_file", "=", "\"dipl...
Retrieve manta output variant file, depending on analysis.
[ "Retrieve", "manta", "output", "variant", "file", "depending", "on", "analysis", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L71-L81
223,083
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_get_evidence_bam
def _get_evidence_bam(work_dir, data): """Retrieve evidence BAM for the sample if it exists """ evidence_bam = glob.glob(os.path.join(work_dir, "results", "evidence", "evidence_*.%s*.bam" % (dd.get_sample_name(data)))) if evidence_bam: return evidence_bam[0]
python
def _get_evidence_bam(work_dir, data): """Retrieve evidence BAM for the sample if it exists """ evidence_bam = glob.glob(os.path.join(work_dir, "results", "evidence", "evidence_*.%s*.bam" % (dd.get_sample_name(data)))) if evidence_bam: return evidence_bam[0]
[ "def", "_get_evidence_bam", "(", "work_dir", ",", "data", ")", ":", "evidence_bam", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"results\"", ",", "\"evidence\"", ",", "\"evidence_*.%s*.bam\"", "%", "(", "dd", ".",...
Retrieve evidence BAM for the sample if it exists
[ "Retrieve", "evidence", "BAM", "for", "the", "sample", "if", "it", "exists" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L83-L89
223,084
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_run_workflow
def _run_workflow(items, paired, workflow_file, work_dir): """Run manta analysis inside prepared workflow directory. """ utils.remove_safe(os.path.join(work_dir, "workspace")) data = paired.tumor_data if paired else items[0] cmd = [utils.get_program_python("configManta.py"), workflow_file, "-m", "local", "-j", dd.get_num_cores(data)] do.run(cmd, "Run manta SV analysis") utils.remove_safe(os.path.join(work_dir, "workspace"))
python
def _run_workflow(items, paired, workflow_file, work_dir): """Run manta analysis inside prepared workflow directory. """ utils.remove_safe(os.path.join(work_dir, "workspace")) data = paired.tumor_data if paired else items[0] cmd = [utils.get_program_python("configManta.py"), workflow_file, "-m", "local", "-j", dd.get_num_cores(data)] do.run(cmd, "Run manta SV analysis") utils.remove_safe(os.path.join(work_dir, "workspace"))
[ "def", "_run_workflow", "(", "items", ",", "paired", ",", "workflow_file", ",", "work_dir", ")", ":", "utils", ".", "remove_safe", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"workspace\"", ")", ")", "data", "=", "paired", ".", "tumor_da...
Run manta analysis inside prepared workflow directory.
[ "Run", "manta", "analysis", "inside", "prepared", "workflow", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L91-L98
223,085
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_prep_config
def _prep_config(items, paired, work_dir): """Run initial configuration, generating a run directory for Manta. """ assert utils.which("configManta.py"), "Could not find installed configManta.py" out_file = os.path.join(work_dir, "runWorkflow.py") if not utils.file_exists(out_file) or _out_of_date(out_file): config_script = os.path.realpath(utils.which("configManta.py")) cmd = [utils.get_program_python("configManta.py"), config_script] if paired: if paired.normal_bam: cmd += ["--normalBam=%s" % paired.normal_bam, "--tumorBam=%s" % paired.tumor_bam] else: cmd += ["--tumorBam=%s" % paired.tumor_bam] else: cmd += ["--bam=%s" % dd.get_align_bam(data) for data in items] data = paired.tumor_data if paired else items[0] cmd += ["--referenceFasta=%s" % dd.get_ref_file(data), "--runDir=%s" % work_dir] if dd.get_coverage_interval(data) not in ["genome"]: cmd += ["--exome"] for region in _maybe_limit_chromosomes(data): cmd += ["--region", region] resources = config_utils.get_resources("manta", data["config"]) if resources.get("options"): cmd += [str(x) for x in resources["options"]] # If we are removing polyX, avoid calling on small indels which require # excessively long runtimes on noisy WGS runs if "polyx" in dd.get_exclude_regions(data): cmd += ["--config", _prep_streamlined_config(config_script, work_dir)] do.run(cmd, "Configure manta SV analysis") return out_file
python
def _prep_config(items, paired, work_dir): """Run initial configuration, generating a run directory for Manta. """ assert utils.which("configManta.py"), "Could not find installed configManta.py" out_file = os.path.join(work_dir, "runWorkflow.py") if not utils.file_exists(out_file) or _out_of_date(out_file): config_script = os.path.realpath(utils.which("configManta.py")) cmd = [utils.get_program_python("configManta.py"), config_script] if paired: if paired.normal_bam: cmd += ["--normalBam=%s" % paired.normal_bam, "--tumorBam=%s" % paired.tumor_bam] else: cmd += ["--tumorBam=%s" % paired.tumor_bam] else: cmd += ["--bam=%s" % dd.get_align_bam(data) for data in items] data = paired.tumor_data if paired else items[0] cmd += ["--referenceFasta=%s" % dd.get_ref_file(data), "--runDir=%s" % work_dir] if dd.get_coverage_interval(data) not in ["genome"]: cmd += ["--exome"] for region in _maybe_limit_chromosomes(data): cmd += ["--region", region] resources = config_utils.get_resources("manta", data["config"]) if resources.get("options"): cmd += [str(x) for x in resources["options"]] # If we are removing polyX, avoid calling on small indels which require # excessively long runtimes on noisy WGS runs if "polyx" in dd.get_exclude_regions(data): cmd += ["--config", _prep_streamlined_config(config_script, work_dir)] do.run(cmd, "Configure manta SV analysis") return out_file
[ "def", "_prep_config", "(", "items", ",", "paired", ",", "work_dir", ")", ":", "assert", "utils", ".", "which", "(", "\"configManta.py\"", ")", ",", "\"Could not find installed configManta.py\"", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ...
Run initial configuration, generating a run directory for Manta.
[ "Run", "initial", "configuration", "generating", "a", "run", "directory", "for", "Manta", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L100-L129
223,086
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_prep_streamlined_config
def _prep_streamlined_config(config_script, work_dir): """Create manta INI file without steps that potentially increase runtimes. This removes calling of small indels. """ new_min_size = 100 in_file = config_script + ".ini" out_file = os.path.join(work_dir, os.path.basename(in_file)) with open(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("minCandidateVariantSize"): out_handle.write("minCandidateVariantSize = %s\n" % new_min_size) else: out_handle.write(line) return out_file
python
def _prep_streamlined_config(config_script, work_dir): """Create manta INI file without steps that potentially increase runtimes. This removes calling of small indels. """ new_min_size = 100 in_file = config_script + ".ini" out_file = os.path.join(work_dir, os.path.basename(in_file)) with open(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("minCandidateVariantSize"): out_handle.write("minCandidateVariantSize = %s\n" % new_min_size) else: out_handle.write(line) return out_file
[ "def", "_prep_streamlined_config", "(", "config_script", ",", "work_dir", ")", ":", "new_min_size", "=", "100", "in_file", "=", "config_script", "+", "\".ini\"", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "os", ".", "path", ".", ...
Create manta INI file without steps that potentially increase runtimes. This removes calling of small indels.
[ "Create", "manta", "INI", "file", "without", "steps", "that", "potentially", "increase", "runtimes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L131-L146
223,087
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_maybe_limit_chromosomes
def _maybe_limit_chromosomes(data): """Potentially limit chromosomes to avoid problematically named HLA contigs. HLAs have ':' characters in them which confuse downstream processing. If we have no problematic chromosomes we don't limit anything. """ std_chroms = [] prob_chroms = [] noalt_calling = "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data) for contig in ref.file_contigs(dd.get_ref_file(data)): if contig.name.find(":") > 0 or (noalt_calling and not chromhacks.is_nonalt(contig.name)): prob_chroms.append(contig.name) else: std_chroms.append(contig.name) if len(prob_chroms) > 0: return std_chroms else: return []
python
def _maybe_limit_chromosomes(data): """Potentially limit chromosomes to avoid problematically named HLA contigs. HLAs have ':' characters in them which confuse downstream processing. If we have no problematic chromosomes we don't limit anything. """ std_chroms = [] prob_chroms = [] noalt_calling = "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data) for contig in ref.file_contigs(dd.get_ref_file(data)): if contig.name.find(":") > 0 or (noalt_calling and not chromhacks.is_nonalt(contig.name)): prob_chroms.append(contig.name) else: std_chroms.append(contig.name) if len(prob_chroms) > 0: return std_chroms else: return []
[ "def", "_maybe_limit_chromosomes", "(", "data", ")", ":", "std_chroms", "=", "[", "]", "prob_chroms", "=", "[", "]", "noalt_calling", "=", "\"noalt_calling\"", "in", "dd", ".", "get_tools_on", "(", "data", ")", "or", "\"altcontigs\"", "in", "dd", ".", "get_e...
Potentially limit chromosomes to avoid problematically named HLA contigs. HLAs have ':' characters in them which confuse downstream processing. If we have no problematic chromosomes we don't limit anything.
[ "Potentially", "limit", "chromosomes", "to", "avoid", "problematically", "named", "HLA", "contigs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L148-L165
223,088
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_out_of_date
def _out_of_date(rw_file): """Check if a run workflow file points to an older version of manta and needs a refresh. """ with open(rw_file) as in_handle: for line in in_handle: if line.startswith("sys.path.append"): file_version = line.split("/lib/python")[0].split("Cellar/manta/")[-1] if file_version != programs.get_version_manifest("manta"): return True return False
python
def _out_of_date(rw_file): """Check if a run workflow file points to an older version of manta and needs a refresh. """ with open(rw_file) as in_handle: for line in in_handle: if line.startswith("sys.path.append"): file_version = line.split("/lib/python")[0].split("Cellar/manta/")[-1] if file_version != programs.get_version_manifest("manta"): return True return False
[ "def", "_out_of_date", "(", "rw_file", ")", ":", "with", "open", "(", "rw_file", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", ".", "startswith", "(", "\"sys.path.append\"", ")", ":", "file_version", "=", "line", ".", ...
Check if a run workflow file points to an older version of manta and needs a refresh.
[ "Check", "if", "a", "run", "workflow", "file", "points", "to", "an", "older", "version", "of", "manta", "and", "needs", "a", "refresh", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L171-L180
223,089
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_freebayes_options_from_config
def _freebayes_options_from_config(items, config, out_file, region=None): """Prepare standard options from configuration input. Input BED target files are merged to avoid overlapping regions which cause FreeBayes to call multiple times. Checks for empty sets of target regions after filtering for high depth, in which case we should skip the FreeBayes run. """ opts = ["--genotype-qualities", "--strict-vcf"] cur_ploidy = ploidy.get_ploidy(items, region) base_ploidy = ploidy.get_ploidy(items) opts += ["--ploidy", str(cur_ploidy)] # Adjust min fraction when trying to call more sensitively in certain # regions. This is primarily meant for pooled mitochondrial calling. if (isinstance(region, (list, tuple)) and chromhacks.is_mitochondrial(region[0]) and cur_ploidy >= base_ploidy and "--min-alternate-fraction" not in opts and "-F" not in opts): opts += ["--min-alternate-fraction", "0.01"] variant_regions = bedutils.population_variant_regions(items, merged=True) # Produce gVCF output if any("gvcf" in dd.get_tools_on(d) for d in items): opts += ["--gvcf", "--gvcf-chunk", "50000"] no_target_regions = False target = shared.subset_variant_regions(variant_regions, region, out_file, items) if target: if isinstance(target, six.string_types) and os.path.isfile(target): if os.path.getsize(target) == 0: no_target_regions = True else: opts += ["--targets", target] else: opts += ["--region", region_to_freebayes(target)] resources = config_utils.get_resources("freebayes", config) if resources.get("options"): opts += resources["options"] return opts, no_target_regions
python
def _freebayes_options_from_config(items, config, out_file, region=None): """Prepare standard options from configuration input. Input BED target files are merged to avoid overlapping regions which cause FreeBayes to call multiple times. Checks for empty sets of target regions after filtering for high depth, in which case we should skip the FreeBayes run. """ opts = ["--genotype-qualities", "--strict-vcf"] cur_ploidy = ploidy.get_ploidy(items, region) base_ploidy = ploidy.get_ploidy(items) opts += ["--ploidy", str(cur_ploidy)] # Adjust min fraction when trying to call more sensitively in certain # regions. This is primarily meant for pooled mitochondrial calling. if (isinstance(region, (list, tuple)) and chromhacks.is_mitochondrial(region[0]) and cur_ploidy >= base_ploidy and "--min-alternate-fraction" not in opts and "-F" not in opts): opts += ["--min-alternate-fraction", "0.01"] variant_regions = bedutils.population_variant_regions(items, merged=True) # Produce gVCF output if any("gvcf" in dd.get_tools_on(d) for d in items): opts += ["--gvcf", "--gvcf-chunk", "50000"] no_target_regions = False target = shared.subset_variant_regions(variant_regions, region, out_file, items) if target: if isinstance(target, six.string_types) and os.path.isfile(target): if os.path.getsize(target) == 0: no_target_regions = True else: opts += ["--targets", target] else: opts += ["--region", region_to_freebayes(target)] resources = config_utils.get_resources("freebayes", config) if resources.get("options"): opts += resources["options"] return opts, no_target_regions
[ "def", "_freebayes_options_from_config", "(", "items", ",", "config", ",", "out_file", ",", "region", "=", "None", ")", ":", "opts", "=", "[", "\"--genotype-qualities\"", ",", "\"--strict-vcf\"", "]", "cur_ploidy", "=", "ploidy", ".", "get_ploidy", "(", "items",...
Prepare standard options from configuration input. Input BED target files are merged to avoid overlapping regions which cause FreeBayes to call multiple times. Checks for empty sets of target regions after filtering for high depth, in which case we should skip the FreeBayes run.
[ "Prepare", "standard", "options", "from", "configuration", "input", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L29-L64
223,090
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_add_somatic_opts
def _add_somatic_opts(opts, paired): """Add somatic options to current set. See _run_freebayes_paired for references. """ if "--min-alternate-fraction" not in opts and "-F" not in opts: # add minimum reportable allele frequency # FreeBayes defaults to 20%, but use 10% by default for the # tumor case min_af = float(utils.get_in(paired.tumor_config, ("algorithm", "min_allele_fraction"), 10)) / 100.0 opts += " --min-alternate-fraction %s" % min_af # Recommended settings for cancer calling opts += (" --pooled-discrete --pooled-continuous " "--report-genotype-likelihood-max --allele-balance-priors-off") return opts
python
def _add_somatic_opts(opts, paired): """Add somatic options to current set. See _run_freebayes_paired for references. """ if "--min-alternate-fraction" not in opts and "-F" not in opts: # add minimum reportable allele frequency # FreeBayes defaults to 20%, but use 10% by default for the # tumor case min_af = float(utils.get_in(paired.tumor_config, ("algorithm", "min_allele_fraction"), 10)) / 100.0 opts += " --min-alternate-fraction %s" % min_af # Recommended settings for cancer calling opts += (" --pooled-discrete --pooled-continuous " "--report-genotype-likelihood-max --allele-balance-priors-off") return opts
[ "def", "_add_somatic_opts", "(", "opts", ",", "paired", ")", ":", "if", "\"--min-alternate-fraction\"", "not", "in", "opts", "and", "\"-F\"", "not", "in", "opts", ":", "# add minimum reportable allele frequency", "# FreeBayes defaults to 20%, but use 10% by default for the", ...
Add somatic options to current set. See _run_freebayes_paired for references.
[ "Add", "somatic", "options", "to", "current", "set", ".", "See", "_run_freebayes_paired", "for", "references", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L66-L79
223,091
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_run_freebayes_caller
def _run_freebayes_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None, somatic=None): """Detect SNPs and indels with FreeBayes. Performs post-filtering to remove very low quality variants which can cause issues feeding into GATK. Breaks variants into individual allelic primitives for analysis and evaluation. """ config = items[0]["config"] if out_file is None: out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0] if not utils.file_exists(out_file): if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: freebayes = config_utils.get_program("freebayes", config) input_bams = " ".join("-b %s" % x for x in align_bams) opts, no_target_regions = _freebayes_options_from_config(items, config, out_file, region) if no_target_regions: vcfutils.write_empty_vcf(tx_out_file, config, samples=[dd.get_sample_name(d) for d in items]) else: opts = " ".join(opts) # Recommended options from 1000 genomes low-complexity evaluation # https://groups.google.com/d/msg/freebayes/GvxIzjcpbas/1G6e3ArxQ4cJ opts += " --min-repeat-entropy 1" # Remove partial observations, which cause a preference for heterozygote calls # https://github.com/ekg/freebayes/issues/234#issuecomment-205331765 opts += " --no-partial-observations" if somatic: opts = _add_somatic_opts(opts, somatic) compress_cmd = "| bgzip -c" if out_file.endswith("gz") else "" # For multi-sample outputs, ensure consistent order samples = ("-s" + ",".join([dd.get_sample_name(d) for d in items])) if len(items) > 1 else "" fix_ambig = vcfutils.fix_ambiguous_cl() py_cl = config_utils.get_program("py", config) cmd = ("{freebayes} -f {ref_file} {opts} {input_bams} " """| bcftools filter -i 'ALT="<*>" || QUAL > 5' """ "| {fix_ambig} | bcftools view {samples} -a - | " "{py_cl} -x 'bcbio.variation.freebayes.remove_missingalt(x)' | " "vcfallelicprimitives -t DECOMPOSED --keep-geno | vcffixup - | vcfstreamsort | " "vt normalize -n -r {ref_file} -q - | vcfuniqalleles | vt uniq - 2> /dev/null " "{compress_cmd} > {tx_out_file}") do.run(cmd.format(**locals()), "Genotyping with FreeBayes", {}) return out_file
python
def _run_freebayes_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None, somatic=None): """Detect SNPs and indels with FreeBayes. Performs post-filtering to remove very low quality variants which can cause issues feeding into GATK. Breaks variants into individual allelic primitives for analysis and evaluation. """ config = items[0]["config"] if out_file is None: out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0] if not utils.file_exists(out_file): if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: freebayes = config_utils.get_program("freebayes", config) input_bams = " ".join("-b %s" % x for x in align_bams) opts, no_target_regions = _freebayes_options_from_config(items, config, out_file, region) if no_target_regions: vcfutils.write_empty_vcf(tx_out_file, config, samples=[dd.get_sample_name(d) for d in items]) else: opts = " ".join(opts) # Recommended options from 1000 genomes low-complexity evaluation # https://groups.google.com/d/msg/freebayes/GvxIzjcpbas/1G6e3ArxQ4cJ opts += " --min-repeat-entropy 1" # Remove partial observations, which cause a preference for heterozygote calls # https://github.com/ekg/freebayes/issues/234#issuecomment-205331765 opts += " --no-partial-observations" if somatic: opts = _add_somatic_opts(opts, somatic) compress_cmd = "| bgzip -c" if out_file.endswith("gz") else "" # For multi-sample outputs, ensure consistent order samples = ("-s" + ",".join([dd.get_sample_name(d) for d in items])) if len(items) > 1 else "" fix_ambig = vcfutils.fix_ambiguous_cl() py_cl = config_utils.get_program("py", config) cmd = ("{freebayes} -f {ref_file} {opts} {input_bams} " """| bcftools filter -i 'ALT="<*>" || QUAL > 5' """ "| {fix_ambig} | bcftools view {samples} -a - | " "{py_cl} -x 'bcbio.variation.freebayes.remove_missingalt(x)' | " "vcfallelicprimitives -t DECOMPOSED --keep-geno | vcffixup - | vcfstreamsort | " "vt normalize -n -r {ref_file} -q - | vcfuniqalleles | vt uniq - 2> /dev/null " "{compress_cmd} > {tx_out_file}") do.run(cmd.format(**locals()), "Genotyping with FreeBayes", {}) return out_file
[ "def", "_run_freebayes_caller", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ",", "somatic", "=", "None", ")", ":", "config", "=", "items", "[", "0", "]", "[", "\"config\"...
Detect SNPs and indels with FreeBayes. Performs post-filtering to remove very low quality variants which can cause issues feeding into GATK. Breaks variants into individual allelic primitives for analysis and evaluation.
[ "Detect", "SNPs", "and", "indels", "with", "FreeBayes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L102-L144
223,092
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_check_lods
def _check_lods(parts, tumor_thresh, normal_thresh, indexes): """Ensure likelihoods for tumor and normal pass thresholds. Skipped if no FreeBayes GL annotations available. """ try: gl_index = parts[8].split(":").index("GL") except ValueError: return True try: tumor_gls = [float(x) for x in parts[indexes["tumor"]].strip().split(":")[gl_index].split(",") if x != "."] if tumor_gls: tumor_lod = max(tumor_gls[i] - tumor_gls[0] for i in range(1, len(tumor_gls))) else: tumor_lod = -1.0 # No GL information, no tumor call (so fail it) except IndexError: tumor_lod = -1.0 try: normal_gls = [float(x) for x in parts[indexes["normal"]].strip().split(":")[gl_index].split(",") if x != "."] if normal_gls: normal_lod = min(normal_gls[0] - normal_gls[i] for i in range(1, len(normal_gls))) else: normal_lod = normal_thresh # No GL inofmration, no normal call (so pass it) except IndexError: normal_lod = normal_thresh return normal_lod >= normal_thresh and tumor_lod >= tumor_thresh
python
def _check_lods(parts, tumor_thresh, normal_thresh, indexes): """Ensure likelihoods for tumor and normal pass thresholds. Skipped if no FreeBayes GL annotations available. """ try: gl_index = parts[8].split(":").index("GL") except ValueError: return True try: tumor_gls = [float(x) for x in parts[indexes["tumor"]].strip().split(":")[gl_index].split(",") if x != "."] if tumor_gls: tumor_lod = max(tumor_gls[i] - tumor_gls[0] for i in range(1, len(tumor_gls))) else: tumor_lod = -1.0 # No GL information, no tumor call (so fail it) except IndexError: tumor_lod = -1.0 try: normal_gls = [float(x) for x in parts[indexes["normal"]].strip().split(":")[gl_index].split(",") if x != "."] if normal_gls: normal_lod = min(normal_gls[0] - normal_gls[i] for i in range(1, len(normal_gls))) else: normal_lod = normal_thresh # No GL inofmration, no normal call (so pass it) except IndexError: normal_lod = normal_thresh return normal_lod >= normal_thresh and tumor_lod >= tumor_thresh
[ "def", "_check_lods", "(", "parts", ",", "tumor_thresh", ",", "normal_thresh", ",", "indexes", ")", ":", "try", ":", "gl_index", "=", "parts", "[", "8", "]", ".", "split", "(", "\":\"", ")", ".", "index", "(", "\"GL\"", ")", "except", "ValueError", ":"...
Ensure likelihoods for tumor and normal pass thresholds. Skipped if no FreeBayes GL annotations available.
[ "Ensure", "likelihoods", "for", "tumor", "and", "normal", "pass", "thresholds", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L197-L224
223,093
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_check_freqs
def _check_freqs(parts, indexes): """Ensure frequency of tumor to normal passes a reasonable threshold. Avoids calling low frequency tumors also present at low frequency in normals, which indicates a contamination or persistent error. """ thresh_ratio = 2.7 try: # FreeBayes ao_index = parts[8].split(":").index("AO") ro_index = parts[8].split(":").index("RO") except ValueError: ao_index, ro_index = None, None try: # VarDict af_index = parts[8].split(":").index("AF") except ValueError: af_index = None if af_index is None and ao_index is None: # okay to skip if a gVCF record if parts[4].find("<*>") == -1: raise NotImplementedError("Unexpected format annotations: %s" % parts[8]) def _calc_freq(item): try: if ao_index is not None and ro_index is not None: ao = sum([int(x) for x in item.split(":")[ao_index].split(",")]) ro = int(item.split(":")[ro_index]) freq = ao / float(ao + ro) elif af_index is not None: freq = float(item.split(":")[af_index]) else: freq = 0.0 except (IndexError, ValueError, ZeroDivisionError): freq = 0.0 return freq tumor_freq, normal_freq = _calc_freq(parts[indexes["tumor"]]), _calc_freq(parts[indexes["normal"]]) return normal_freq <= 0.001 or normal_freq <= tumor_freq / thresh_ratio
python
def _check_freqs(parts, indexes): """Ensure frequency of tumor to normal passes a reasonable threshold. Avoids calling low frequency tumors also present at low frequency in normals, which indicates a contamination or persistent error. """ thresh_ratio = 2.7 try: # FreeBayes ao_index = parts[8].split(":").index("AO") ro_index = parts[8].split(":").index("RO") except ValueError: ao_index, ro_index = None, None try: # VarDict af_index = parts[8].split(":").index("AF") except ValueError: af_index = None if af_index is None and ao_index is None: # okay to skip if a gVCF record if parts[4].find("<*>") == -1: raise NotImplementedError("Unexpected format annotations: %s" % parts[8]) def _calc_freq(item): try: if ao_index is not None and ro_index is not None: ao = sum([int(x) for x in item.split(":")[ao_index].split(",")]) ro = int(item.split(":")[ro_index]) freq = ao / float(ao + ro) elif af_index is not None: freq = float(item.split(":")[af_index]) else: freq = 0.0 except (IndexError, ValueError, ZeroDivisionError): freq = 0.0 return freq tumor_freq, normal_freq = _calc_freq(parts[indexes["tumor"]]), _calc_freq(parts[indexes["normal"]]) return normal_freq <= 0.001 or normal_freq <= tumor_freq / thresh_ratio
[ "def", "_check_freqs", "(", "parts", ",", "indexes", ")", ":", "thresh_ratio", "=", "2.7", "try", ":", "# FreeBayes", "ao_index", "=", "parts", "[", "8", "]", ".", "split", "(", "\":\"", ")", ".", "index", "(", "\"AO\"", ")", "ro_index", "=", "parts", ...
Ensure frequency of tumor to normal passes a reasonable threshold. Avoids calling low frequency tumors also present at low frequency in normals, which indicates a contamination or persistent error.
[ "Ensure", "frequency", "of", "tumor", "to", "normal", "passes", "a", "reasonable", "threshold", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L226-L260
223,094
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_clean_freebayes_output
def _clean_freebayes_output(line): """Clean FreeBayes output to make post-processing with GATK happy. XXX Not applied on recent versions which fix issues to be more compatible with bgzip output, but retained in case of need. - Remove lines from FreeBayes outputs where REF/ALT are identical: 2 22816178 . G G 0.0339196 or there are multiple duplicate alleles: 4 60594753 . TGAAA T,T - Remove Type=Int specifications which are not valid VCF and GATK chokes on. """ if line.startswith("#"): line = line.replace("Type=Int,D", "Type=Integer,D") return line else: parts = line.split("\t") alleles = [x.strip() for x in parts[4].split(",")] + [parts[3].strip()] if len(alleles) == len(set(alleles)): return line return None
python
def _clean_freebayes_output(line): """Clean FreeBayes output to make post-processing with GATK happy. XXX Not applied on recent versions which fix issues to be more compatible with bgzip output, but retained in case of need. - Remove lines from FreeBayes outputs where REF/ALT are identical: 2 22816178 . G G 0.0339196 or there are multiple duplicate alleles: 4 60594753 . TGAAA T,T - Remove Type=Int specifications which are not valid VCF and GATK chokes on. """ if line.startswith("#"): line = line.replace("Type=Int,D", "Type=Integer,D") return line else: parts = line.split("\t") alleles = [x.strip() for x in parts[4].split(",")] + [parts[3].strip()] if len(alleles) == len(set(alleles)): return line return None
[ "def", "_clean_freebayes_output", "(", "line", ")", ":", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "line", "=", "line", ".", "replace", "(", "\"Type=Int,D\"", ",", "\"Type=Integer,D\"", ")", "return", "line", "else", ":", "parts", "=", "line...
Clean FreeBayes output to make post-processing with GATK happy. XXX Not applied on recent versions which fix issues to be more compatible with bgzip output, but retained in case of need. - Remove lines from FreeBayes outputs where REF/ALT are identical: 2 22816178 . G G 0.0339196 or there are multiple duplicate alleles: 4 60594753 . TGAAA T,T - Remove Type=Int specifications which are not valid VCF and GATK chokes on.
[ "Clean", "FreeBayes", "output", "to", "make", "post", "-", "processing", "with", "GATK", "happy", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L332-L353
223,095
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
clean_vcf_output
def clean_vcf_output(orig_file, clean_fn, config, name="clean"): """Provide framework to clean a file in-place, with the specified clean function. """ base, ext = utils.splitext_plus(orig_file) out_file = "{0}-{1}{2}".format(base, name, ext) if not utils.file_exists(out_file): with open(orig_file) as in_handle: with file_transaction(config, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for line in in_handle: update_line = clean_fn(line) if update_line: out_handle.write(update_line) move_vcf(orig_file, "{0}.orig".format(orig_file)) move_vcf(out_file, orig_file) with open(out_file, "w") as out_handle: out_handle.write("Moved to {0}".format(orig_file))
python
def clean_vcf_output(orig_file, clean_fn, config, name="clean"): """Provide framework to clean a file in-place, with the specified clean function. """ base, ext = utils.splitext_plus(orig_file) out_file = "{0}-{1}{2}".format(base, name, ext) if not utils.file_exists(out_file): with open(orig_file) as in_handle: with file_transaction(config, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for line in in_handle: update_line = clean_fn(line) if update_line: out_handle.write(update_line) move_vcf(orig_file, "{0}.orig".format(orig_file)) move_vcf(out_file, orig_file) with open(out_file, "w") as out_handle: out_handle.write("Moved to {0}".format(orig_file))
[ "def", "clean_vcf_output", "(", "orig_file", ",", "clean_fn", ",", "config", ",", "name", "=", "\"clean\"", ")", ":", "base", ",", "ext", "=", "utils", ".", "splitext_plus", "(", "orig_file", ")", "out_file", "=", "\"{0}-{1}{2}\"", ".", "format", "(", "bas...
Provide framework to clean a file in-place, with the specified clean function.
[ "Provide", "framework", "to", "clean", "a", "file", "in", "-", "place", "with", "the", "specified", "clean", "function", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L355-L372
223,096
bcbio/bcbio-nextgen
bcbio/variation/effects.py
get_type
def get_type(data): """Retrieve the type of effects calculation to do. """ if data["analysis"].lower().startswith("var") or dd.get_variantcaller(data): return tz.get_in(("config", "algorithm", "effects"), data, "snpeff")
python
def get_type(data): """Retrieve the type of effects calculation to do. """ if data["analysis"].lower().startswith("var") or dd.get_variantcaller(data): return tz.get_in(("config", "algorithm", "effects"), data, "snpeff")
[ "def", "get_type", "(", "data", ")", ":", "if", "data", "[", "\"analysis\"", "]", ".", "lower", "(", ")", ".", "startswith", "(", "\"var\"", ")", "or", "dd", ".", "get_variantcaller", "(", "data", ")", ":", "return", "tz", ".", "get_in", "(", "(", ...
Retrieve the type of effects calculation to do.
[ "Retrieve", "the", "type", "of", "effects", "calculation", "to", "do", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L46-L50
223,097
bcbio/bcbio-nextgen
bcbio/variation/effects.py
prep_vep_cache
def prep_vep_cache(dbkey, ref_file, tooldir=None, config=None): """Ensure correct installation of VEP cache file. """ if config is None: config = {} resource_file = os.path.join(os.path.dirname(ref_file), "%s-resources.yaml" % dbkey) if os.path.exists(resource_file): with open(resource_file) as in_handle: resources = yaml.safe_load(in_handle) ensembl_name = tz.get_in(["aliases", "ensembl"], resources) symlink_dir = _special_dbkey_maps(dbkey, ref_file) if ensembl_name and ensembl_name.find("_vep_") == -1: raise ValueError("%s has ensembl an incorrect value." "It should have _vep_ in the name." "Remove line or fix the name to avoid error.") if symlink_dir and ensembl_name: species, vepv = ensembl_name.split("_vep_") return symlink_dir, species elif ensembl_name: species, vepv = ensembl_name.split("_vep_") vep_dir = utils.safe_makedir(os.path.normpath(os.path.join( os.path.dirname(os.path.dirname(ref_file)), "vep"))) out_dir = os.path.join(vep_dir, species, vepv) if not os.path.exists(out_dir): tmp_dir = utils.safe_makedir(os.path.join(vep_dir, species, "txtmp")) eversion = vepv.split("_")[0] url = "http://ftp.ensembl.org/pub/release-%s/variation/VEP/%s.tar.gz" % (eversion, ensembl_name) with utils.chdir(tmp_dir): subprocess.check_call(["wget", "--no-check-certificate", "-c", url]) vep_path = "%s/bin/" % tooldir if tooldir else "" perl_exports = utils.get_perl_exports() cmd = ["%svep_install" % vep_path, "-a", "c", "-s", ensembl_name, "-c", vep_dir, "-u", tmp_dir, "--NO_UPDATE", "--VERSION", eversion] do.run("%s && %s" % (perl_exports, " ".join(cmd)), "Prepare VEP directory for %s" % ensembl_name) cmd = ["%svep_convert_cache" % vep_path, "--species", species, "--version", vepv, "--dir", vep_dir, "--force_overwrite", "--remove"] do.run("%s && %s" % (perl_exports, " ".join(cmd)), "Convert VEP cache to tabix %s" % ensembl_name) for tmp_fname in os.listdir(tmp_dir): os.remove(os.path.join(tmp_dir, tmp_fname)) os.rmdir(tmp_dir) tmp_dir = os.path.join(vep_dir, "tmp") if os.path.exists(tmp_dir): shutil.rmtree(tmp_dir) return vep_dir, species return None, None
python
def prep_vep_cache(dbkey, ref_file, tooldir=None, config=None): """Ensure correct installation of VEP cache file. """ if config is None: config = {} resource_file = os.path.join(os.path.dirname(ref_file), "%s-resources.yaml" % dbkey) if os.path.exists(resource_file): with open(resource_file) as in_handle: resources = yaml.safe_load(in_handle) ensembl_name = tz.get_in(["aliases", "ensembl"], resources) symlink_dir = _special_dbkey_maps(dbkey, ref_file) if ensembl_name and ensembl_name.find("_vep_") == -1: raise ValueError("%s has ensembl an incorrect value." "It should have _vep_ in the name." "Remove line or fix the name to avoid error.") if symlink_dir and ensembl_name: species, vepv = ensembl_name.split("_vep_") return symlink_dir, species elif ensembl_name: species, vepv = ensembl_name.split("_vep_") vep_dir = utils.safe_makedir(os.path.normpath(os.path.join( os.path.dirname(os.path.dirname(ref_file)), "vep"))) out_dir = os.path.join(vep_dir, species, vepv) if not os.path.exists(out_dir): tmp_dir = utils.safe_makedir(os.path.join(vep_dir, species, "txtmp")) eversion = vepv.split("_")[0] url = "http://ftp.ensembl.org/pub/release-%s/variation/VEP/%s.tar.gz" % (eversion, ensembl_name) with utils.chdir(tmp_dir): subprocess.check_call(["wget", "--no-check-certificate", "-c", url]) vep_path = "%s/bin/" % tooldir if tooldir else "" perl_exports = utils.get_perl_exports() cmd = ["%svep_install" % vep_path, "-a", "c", "-s", ensembl_name, "-c", vep_dir, "-u", tmp_dir, "--NO_UPDATE", "--VERSION", eversion] do.run("%s && %s" % (perl_exports, " ".join(cmd)), "Prepare VEP directory for %s" % ensembl_name) cmd = ["%svep_convert_cache" % vep_path, "--species", species, "--version", vepv, "--dir", vep_dir, "--force_overwrite", "--remove"] do.run("%s && %s" % (perl_exports, " ".join(cmd)), "Convert VEP cache to tabix %s" % ensembl_name) for tmp_fname in os.listdir(tmp_dir): os.remove(os.path.join(tmp_dir, tmp_fname)) os.rmdir(tmp_dir) tmp_dir = os.path.join(vep_dir, "tmp") if os.path.exists(tmp_dir): shutil.rmtree(tmp_dir) return vep_dir, species return None, None
[ "def", "prep_vep_cache", "(", "dbkey", ",", "ref_file", ",", "tooldir", "=", "None", ",", "config", "=", "None", ")", ":", "if", "config", "is", "None", ":", "config", "=", "{", "}", "resource_file", "=", "os", ".", "path", ".", "join", "(", "os", ...
Ensure correct installation of VEP cache file.
[ "Ensure", "correct", "installation", "of", "VEP", "cache", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L74-L117
223,098
bcbio/bcbio-nextgen
bcbio/variation/effects.py
_get_G2P
def _get_G2P(data): """ A VEP plugin that uses G2P allelic requirements to assess variants in genes for potential phenotype involvement. """ G2P_file = os.path.realpath(tz.get_in(("genome_resources", "variation", "genotype2phenotype"), data)) args = ["--plugin", "G2P,file:%s" % (G2P_file)] if G2P_file: return args else: return []
python
def _get_G2P(data): """ A VEP plugin that uses G2P allelic requirements to assess variants in genes for potential phenotype involvement. """ G2P_file = os.path.realpath(tz.get_in(("genome_resources", "variation", "genotype2phenotype"), data)) args = ["--plugin", "G2P,file:%s" % (G2P_file)] if G2P_file: return args else: return []
[ "def", "_get_G2P", "(", "data", ")", ":", "G2P_file", "=", "os", ".", "path", ".", "realpath", "(", "tz", ".", "get_in", "(", "(", "\"genome_resources\"", ",", "\"variation\"", ",", "\"genotype2phenotype\"", ")", ",", "data", ")", ")", "args", "=", "[", ...
A VEP plugin that uses G2P allelic requirements to assess variants in genes for potential phenotype involvement.
[ "A", "VEP", "plugin", "that", "uses", "G2P", "allelic", "requirements", "to", "assess", "variants", "in", "genes", "for", "potential", "phenotype", "involvement", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L234-L244
223,099
bcbio/bcbio-nextgen
bcbio/variation/effects.py
_snpeff_args_from_config
def _snpeff_args_from_config(data): """Retrieve snpEff arguments supplied through input configuration. """ config = data["config"] args = ["-hgvs"] # General supplied arguments resources = config_utils.get_resources("snpeff", config) if resources.get("options"): args += [str(x) for x in resources.get("options", [])] # cancer specific calling arguments if vcfutils.get_paired_phenotype(data): args += ["-cancer"] effects_transcripts = dd.get_effects_transcripts(data) if effects_transcripts in set(["canonical_cancer"]): _, snpeff_base_dir = get_db(data) canon_list_file = os.path.join(snpeff_base_dir, "transcripts", "%s.txt" % effects_transcripts) if not utils.file_exists(canon_list_file): raise ValueError("Cannot find expected file for effects_transcripts: %s" % canon_list_file) args += ["-canonList", canon_list_file] elif effects_transcripts == "canonical" or tz.get_in(("config", "algorithm", "clinical_reporting"), data): args += ["-canon"] return args
python
def _snpeff_args_from_config(data): """Retrieve snpEff arguments supplied through input configuration. """ config = data["config"] args = ["-hgvs"] # General supplied arguments resources = config_utils.get_resources("snpeff", config) if resources.get("options"): args += [str(x) for x in resources.get("options", [])] # cancer specific calling arguments if vcfutils.get_paired_phenotype(data): args += ["-cancer"] effects_transcripts = dd.get_effects_transcripts(data) if effects_transcripts in set(["canonical_cancer"]): _, snpeff_base_dir = get_db(data) canon_list_file = os.path.join(snpeff_base_dir, "transcripts", "%s.txt" % effects_transcripts) if not utils.file_exists(canon_list_file): raise ValueError("Cannot find expected file for effects_transcripts: %s" % canon_list_file) args += ["-canonList", canon_list_file] elif effects_transcripts == "canonical" or tz.get_in(("config", "algorithm", "clinical_reporting"), data): args += ["-canon"] return args
[ "def", "_snpeff_args_from_config", "(", "data", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "args", "=", "[", "\"-hgvs\"", "]", "# General supplied arguments", "resources", "=", "config_utils", ".", "get_resources", "(", "\"snpeff\"", ",", "config", ...
Retrieve snpEff arguments supplied through input configuration.
[ "Retrieve", "snpEff", "arguments", "supplied", "through", "input", "configuration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L266-L288