repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
alvarogzp/telegram-bot-framework
bot/action/util/format.py
UserFormatter.full_data
def full_data(self): """ Returns all the info available for the user in the following format: name [username] <id> (locale) bot_or_user If any data is not available, it is not added. """ data = [ self.full_name, self._username(), self._id(), self._language_code(), self._is_bot() ] return " ".join(filter(None, data))
python
def full_data(self): """ Returns all the info available for the user in the following format: name [username] <id> (locale) bot_or_user If any data is not available, it is not added. """ data = [ self.full_name, self._username(), self._id(), self._language_code(), self._is_bot() ] return " ".join(filter(None, data))
[ "def", "full_data", "(", "self", ")", ":", "data", "=", "[", "self", ".", "full_name", ",", "self", ".", "_username", "(", ")", ",", "self", ".", "_id", "(", ")", ",", "self", ".", "_language_code", "(", ")", ",", "self", ".", "_is_bot", "(", ")", "]", "return", "\" \"", ".", "join", "(", "filter", "(", "None", ",", "data", ")", ")" ]
Returns all the info available for the user in the following format: name [username] <id> (locale) bot_or_user If any data is not available, it is not added.
[ "Returns", "all", "the", "info", "available", "for", "the", "user", "in", "the", "following", "format", ":", "name", "[", "username", "]", "<id", ">", "(", "locale", ")", "bot_or_user", "If", "any", "data", "is", "not", "available", "it", "is", "not", "added", "." ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/format.py#L79-L92
alvarogzp/telegram-bot-framework
bot/action/util/format.py
ChatFormatter.full_data
def full_data(self): """ Returns all the info available for the chat in the following format: title [username] (type) <id> If any data is not available, it is not added. """ data = [ self.chat.title, self._username(), self._type(), self._id() ] return " ".join(filter(None, data))
python
def full_data(self): """ Returns all the info available for the chat in the following format: title [username] (type) <id> If any data is not available, it is not added. """ data = [ self.chat.title, self._username(), self._type(), self._id() ] return " ".join(filter(None, data))
[ "def", "full_data", "(", "self", ")", ":", "data", "=", "[", "self", ".", "chat", ".", "title", ",", "self", ".", "_username", "(", ")", ",", "self", ".", "_type", "(", ")", ",", "self", ".", "_id", "(", ")", "]", "return", "\" \"", ".", "join", "(", "filter", "(", "None", ",", "data", ")", ")" ]
Returns all the info available for the chat in the following format: title [username] (type) <id> If any data is not available, it is not added.
[ "Returns", "all", "the", "info", "available", "for", "the", "chat", "in", "the", "following", "format", ":", "title", "[", "username", "]", "(", "type", ")", "<id", ">", "If", "any", "data", "is", "not", "available", "it", "is", "not", "added", "." ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/format.py#L124-L136
hammerlab/cohorts
cohorts/functions.py
use_defaults
def use_defaults(func): """ Decorator for functions that should automatically fall back to the Cohort-default filter_fn and normalized_per_mb if not specified. """ @wraps(func) def wrapper(row, cohort, filter_fn=None, normalized_per_mb=None, **kwargs): filter_fn = first_not_none_param([filter_fn, cohort.filter_fn], no_filter) normalized_per_mb = first_not_none_param([normalized_per_mb, cohort.normalized_per_mb], False) return func(row=row, cohort=cohort, filter_fn=filter_fn, normalized_per_mb=normalized_per_mb, **kwargs) return wrapper
python
def use_defaults(func): """ Decorator for functions that should automatically fall back to the Cohort-default filter_fn and normalized_per_mb if not specified. """ @wraps(func) def wrapper(row, cohort, filter_fn=None, normalized_per_mb=None, **kwargs): filter_fn = first_not_none_param([filter_fn, cohort.filter_fn], no_filter) normalized_per_mb = first_not_none_param([normalized_per_mb, cohort.normalized_per_mb], False) return func(row=row, cohort=cohort, filter_fn=filter_fn, normalized_per_mb=normalized_per_mb, **kwargs) return wrapper
[ "def", "use_defaults", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "row", ",", "cohort", ",", "filter_fn", "=", "None", ",", "normalized_per_mb", "=", "None", ",", "*", "*", "kwargs", ")", ":", "filter_fn", "=", "first_not_none_param", "(", "[", "filter_fn", ",", "cohort", ".", "filter_fn", "]", ",", "no_filter", ")", "normalized_per_mb", "=", "first_not_none_param", "(", "[", "normalized_per_mb", ",", "cohort", ".", "normalized_per_mb", "]", ",", "False", ")", "return", "func", "(", "row", "=", "row", ",", "cohort", "=", "cohort", ",", "filter_fn", "=", "filter_fn", ",", "normalized_per_mb", "=", "normalized_per_mb", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Decorator for functions that should automatically fall back to the Cohort-default filter_fn and normalized_per_mb if not specified.
[ "Decorator", "for", "functions", "that", "should", "automatically", "fall", "back", "to", "the", "Cohort", "-", "default", "filter_fn", "and", "normalized_per_mb", "if", "not", "specified", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/functions.py#L28-L42
hammerlab/cohorts
cohorts/functions.py
count_function
def count_function(func): """ Decorator for functions that return a collection (technically a dict of collections) that should be counted up. Also automatically falls back to the Cohort-default filter_fn and normalized_per_mb if not specified. """ # Fall back to Cohort-level defaults. @use_defaults @wraps(func) def wrapper(row, cohort, filter_fn=None, normalized_per_mb=None, **kwargs): per_patient_data = func(row=row, cohort=cohort, filter_fn=filter_fn, normalized_per_mb=normalized_per_mb, **kwargs) patient_id = row["patient_id"] if patient_id in per_patient_data: count = len(per_patient_data[patient_id]) if normalized_per_mb: count /= float(get_patient_to_mb(cohort)[patient_id]) return count return np.nan return wrapper
python
def count_function(func): """ Decorator for functions that return a collection (technically a dict of collections) that should be counted up. Also automatically falls back to the Cohort-default filter_fn and normalized_per_mb if not specified. """ # Fall back to Cohort-level defaults. @use_defaults @wraps(func) def wrapper(row, cohort, filter_fn=None, normalized_per_mb=None, **kwargs): per_patient_data = func(row=row, cohort=cohort, filter_fn=filter_fn, normalized_per_mb=normalized_per_mb, **kwargs) patient_id = row["patient_id"] if patient_id in per_patient_data: count = len(per_patient_data[patient_id]) if normalized_per_mb: count /= float(get_patient_to_mb(cohort)[patient_id]) return count return np.nan return wrapper
[ "def", "count_function", "(", "func", ")", ":", "# Fall back to Cohort-level defaults.", "@", "use_defaults", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "row", ",", "cohort", ",", "filter_fn", "=", "None", ",", "normalized_per_mb", "=", "None", ",", "*", "*", "kwargs", ")", ":", "per_patient_data", "=", "func", "(", "row", "=", "row", ",", "cohort", "=", "cohort", ",", "filter_fn", "=", "filter_fn", ",", "normalized_per_mb", "=", "normalized_per_mb", ",", "*", "*", "kwargs", ")", "patient_id", "=", "row", "[", "\"patient_id\"", "]", "if", "patient_id", "in", "per_patient_data", ":", "count", "=", "len", "(", "per_patient_data", "[", "patient_id", "]", ")", "if", "normalized_per_mb", ":", "count", "/=", "float", "(", "get_patient_to_mb", "(", "cohort", ")", "[", "patient_id", "]", ")", "return", "count", "return", "np", ".", "nan", "return", "wrapper" ]
Decorator for functions that return a collection (technically a dict of collections) that should be counted up. Also automatically falls back to the Cohort-default filter_fn and normalized_per_mb if not specified.
[ "Decorator", "for", "functions", "that", "return", "a", "collection", "(", "technically", "a", "dict", "of", "collections", ")", "that", "should", "be", "counted", "up", ".", "Also", "automatically", "falls", "back", "to", "the", "Cohort", "-", "default", "filter_fn", "and", "normalized_per_mb", "if", "not", "specified", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/functions.py#L44-L66
hammerlab/cohorts
cohorts/functions.py
count_variants_function_builder
def count_variants_function_builder(function_name, filterable_variant_function=None): """ Creates a function that counts variants that are filtered by the provided filterable_variant_function. The filterable_variant_function is a function that takes a filterable_variant and returns True or False. Users of this builder need not worry about applying e.g. the Cohort's default `filter_fn`. That will be applied as well. """ @count_function def count(row, cohort, filter_fn, normalized_per_mb, **kwargs): def count_filter_fn(filterable_variant, **kwargs): assert filter_fn is not None, "filter_fn should never be None, but it is." return ((filterable_variant_function(filterable_variant) if filterable_variant_function is not None else True) and filter_fn(filterable_variant, **kwargs)) patient_id = row["patient_id"] return cohort.load_variants( patients=[cohort.patient_from_id(patient_id)], filter_fn=count_filter_fn, **kwargs) count.__name__ = function_name count.__doc__ = str("".join(inspect.getsourcelines(filterable_variant_function)[0])) if filterable_variant_function is not None else "" return count
python
def count_variants_function_builder(function_name, filterable_variant_function=None): """ Creates a function that counts variants that are filtered by the provided filterable_variant_function. The filterable_variant_function is a function that takes a filterable_variant and returns True or False. Users of this builder need not worry about applying e.g. the Cohort's default `filter_fn`. That will be applied as well. """ @count_function def count(row, cohort, filter_fn, normalized_per_mb, **kwargs): def count_filter_fn(filterable_variant, **kwargs): assert filter_fn is not None, "filter_fn should never be None, but it is." return ((filterable_variant_function(filterable_variant) if filterable_variant_function is not None else True) and filter_fn(filterable_variant, **kwargs)) patient_id = row["patient_id"] return cohort.load_variants( patients=[cohort.patient_from_id(patient_id)], filter_fn=count_filter_fn, **kwargs) count.__name__ = function_name count.__doc__ = str("".join(inspect.getsourcelines(filterable_variant_function)[0])) if filterable_variant_function is not None else "" return count
[ "def", "count_variants_function_builder", "(", "function_name", ",", "filterable_variant_function", "=", "None", ")", ":", "@", "count_function", "def", "count", "(", "row", ",", "cohort", ",", "filter_fn", ",", "normalized_per_mb", ",", "*", "*", "kwargs", ")", ":", "def", "count_filter_fn", "(", "filterable_variant", ",", "*", "*", "kwargs", ")", ":", "assert", "filter_fn", "is", "not", "None", ",", "\"filter_fn should never be None, but it is.\"", "return", "(", "(", "filterable_variant_function", "(", "filterable_variant", ")", "if", "filterable_variant_function", "is", "not", "None", "else", "True", ")", "and", "filter_fn", "(", "filterable_variant", ",", "*", "*", "kwargs", ")", ")", "patient_id", "=", "row", "[", "\"patient_id\"", "]", "return", "cohort", ".", "load_variants", "(", "patients", "=", "[", "cohort", ".", "patient_from_id", "(", "patient_id", ")", "]", ",", "filter_fn", "=", "count_filter_fn", ",", "*", "*", "kwargs", ")", "count", ".", "__name__", "=", "function_name", "count", ".", "__doc__", "=", "str", "(", "\"\"", ".", "join", "(", "inspect", ".", "getsourcelines", "(", "filterable_variant_function", ")", "[", "0", "]", ")", ")", "if", "filterable_variant_function", "is", "not", "None", "else", "\"\"", "return", "count" ]
Creates a function that counts variants that are filtered by the provided filterable_variant_function. The filterable_variant_function is a function that takes a filterable_variant and returns True or False. Users of this builder need not worry about applying e.g. the Cohort's default `filter_fn`. That will be applied as well.
[ "Creates", "a", "function", "that", "counts", "variants", "that", "are", "filtered", "by", "the", "provided", "filterable_variant_function", ".", "The", "filterable_variant_function", "is", "a", "function", "that", "takes", "a", "filterable_variant", "and", "returns", "True", "or", "False", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/functions.py#L73-L93
hammerlab/cohorts
cohorts/functions.py
count_effects_function_builder
def count_effects_function_builder(function_name, only_nonsynonymous, filterable_effect_function=None): """ Create a function that counts effects that are filtered by the provided filterable_effect_function. The filterable_effect_function is a function that takes a filterable_effect and returns True or False. Users of this builder need not worry about applying e.g. the Cohort's default `filter_fn`. That will be applied as well. """ @count_function def count(row, cohort, filter_fn, normalized_per_mb, **kwargs): def count_filter_fn(filterable_effect, **kwargs): assert filter_fn is not None, "filter_fn should never be None, but it is." return ((filterable_effect_function(filterable_effect) if filterable_effect_function is not None else True) and filter_fn(filterable_effect, **kwargs)) # This only loads one effect per variant. patient_id = row["patient_id"] return cohort.load_effects( only_nonsynonymous=only_nonsynonymous, patients=[cohort.patient_from_id(patient_id)], filter_fn=count_filter_fn, **kwargs) count.__name__ = function_name count.__doc__ = (("only_nonsynonymous=%s\n" % only_nonsynonymous) + str("".join(inspect.getsourcelines(filterable_effect_function)[0])) if filterable_effect_function is not None else "") # Keep track of these to be able to query the returned function for these attributes count.only_nonsynonymous = only_nonsynonymous count.filterable_effect_function = filterable_effect_function return count
python
def count_effects_function_builder(function_name, only_nonsynonymous, filterable_effect_function=None): """ Create a function that counts effects that are filtered by the provided filterable_effect_function. The filterable_effect_function is a function that takes a filterable_effect and returns True or False. Users of this builder need not worry about applying e.g. the Cohort's default `filter_fn`. That will be applied as well. """ @count_function def count(row, cohort, filter_fn, normalized_per_mb, **kwargs): def count_filter_fn(filterable_effect, **kwargs): assert filter_fn is not None, "filter_fn should never be None, but it is." return ((filterable_effect_function(filterable_effect) if filterable_effect_function is not None else True) and filter_fn(filterable_effect, **kwargs)) # This only loads one effect per variant. patient_id = row["patient_id"] return cohort.load_effects( only_nonsynonymous=only_nonsynonymous, patients=[cohort.patient_from_id(patient_id)], filter_fn=count_filter_fn, **kwargs) count.__name__ = function_name count.__doc__ = (("only_nonsynonymous=%s\n" % only_nonsynonymous) + str("".join(inspect.getsourcelines(filterable_effect_function)[0])) if filterable_effect_function is not None else "") # Keep track of these to be able to query the returned function for these attributes count.only_nonsynonymous = only_nonsynonymous count.filterable_effect_function = filterable_effect_function return count
[ "def", "count_effects_function_builder", "(", "function_name", ",", "only_nonsynonymous", ",", "filterable_effect_function", "=", "None", ")", ":", "@", "count_function", "def", "count", "(", "row", ",", "cohort", ",", "filter_fn", ",", "normalized_per_mb", ",", "*", "*", "kwargs", ")", ":", "def", "count_filter_fn", "(", "filterable_effect", ",", "*", "*", "kwargs", ")", ":", "assert", "filter_fn", "is", "not", "None", ",", "\"filter_fn should never be None, but it is.\"", "return", "(", "(", "filterable_effect_function", "(", "filterable_effect", ")", "if", "filterable_effect_function", "is", "not", "None", "else", "True", ")", "and", "filter_fn", "(", "filterable_effect", ",", "*", "*", "kwargs", ")", ")", "# This only loads one effect per variant.", "patient_id", "=", "row", "[", "\"patient_id\"", "]", "return", "cohort", ".", "load_effects", "(", "only_nonsynonymous", "=", "only_nonsynonymous", ",", "patients", "=", "[", "cohort", ".", "patient_from_id", "(", "patient_id", ")", "]", ",", "filter_fn", "=", "count_filter_fn", ",", "*", "*", "kwargs", ")", "count", ".", "__name__", "=", "function_name", "count", ".", "__doc__", "=", "(", "(", "\"only_nonsynonymous=%s\\n\"", "%", "only_nonsynonymous", ")", "+", "str", "(", "\"\"", ".", "join", "(", "inspect", ".", "getsourcelines", "(", "filterable_effect_function", ")", "[", "0", "]", ")", ")", "if", "filterable_effect_function", "is", "not", "None", "else", "\"\"", ")", "# Keep track of these to be able to query the returned function for these attributes", "count", ".", "only_nonsynonymous", "=", "only_nonsynonymous", "count", ".", "filterable_effect_function", "=", "filterable_effect_function", "return", "count" ]
Create a function that counts effects that are filtered by the provided filterable_effect_function. The filterable_effect_function is a function that takes a filterable_effect and returns True or False. Users of this builder need not worry about applying e.g. the Cohort's default `filter_fn`. That will be applied as well.
[ "Create", "a", "function", "that", "counts", "effects", "that", "are", "filtered", "by", "the", "provided", "filterable_effect_function", ".", "The", "filterable_effect_function", "is", "a", "function", "that", "takes", "a", "filterable_effect", "and", "returns", "True", "or", "False", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/functions.py#L95-L121
hammerlab/cohorts
cohorts/functions.py
median_vaf_purity
def median_vaf_purity(row, cohort, **kwargs): """ Estimate purity based on 2 * median VAF. Even if the Cohort has a default filter_fn, ignore it: we want to use all variants for this estimate. """ patient_id = row["patient_id"] patient = cohort.patient_from_id(patient_id) variants = cohort.load_variants(patients=[patient], filter_fn=no_filter) if patient_id in variants.keys(): variants = variants[patient_id] else: return np.nan def grab_vaf(variant): filterable_variant = FilterableVariant(variant, variants, patient) return variant_stats_from_variant(variant, filterable_variant.variant_metadata).tumor_stats.variant_allele_frequency vafs = [grab_vaf(variant) for variant in variants] return 2 * pd.Series(vafs).median()
python
def median_vaf_purity(row, cohort, **kwargs): """ Estimate purity based on 2 * median VAF. Even if the Cohort has a default filter_fn, ignore it: we want to use all variants for this estimate. """ patient_id = row["patient_id"] patient = cohort.patient_from_id(patient_id) variants = cohort.load_variants(patients=[patient], filter_fn=no_filter) if patient_id in variants.keys(): variants = variants[patient_id] else: return np.nan def grab_vaf(variant): filterable_variant = FilterableVariant(variant, variants, patient) return variant_stats_from_variant(variant, filterable_variant.variant_metadata).tumor_stats.variant_allele_frequency vafs = [grab_vaf(variant) for variant in variants] return 2 * pd.Series(vafs).median()
[ "def", "median_vaf_purity", "(", "row", ",", "cohort", ",", "*", "*", "kwargs", ")", ":", "patient_id", "=", "row", "[", "\"patient_id\"", "]", "patient", "=", "cohort", ".", "patient_from_id", "(", "patient_id", ")", "variants", "=", "cohort", ".", "load_variants", "(", "patients", "=", "[", "patient", "]", ",", "filter_fn", "=", "no_filter", ")", "if", "patient_id", "in", "variants", ".", "keys", "(", ")", ":", "variants", "=", "variants", "[", "patient_id", "]", "else", ":", "return", "np", ".", "nan", "def", "grab_vaf", "(", "variant", ")", ":", "filterable_variant", "=", "FilterableVariant", "(", "variant", ",", "variants", ",", "patient", ")", "return", "variant_stats_from_variant", "(", "variant", ",", "filterable_variant", ".", "variant_metadata", ")", ".", "tumor_stats", ".", "variant_allele_frequency", "vafs", "=", "[", "grab_vaf", "(", "variant", ")", "for", "variant", "in", "variants", "]", "return", "2", "*", "pd", ".", "Series", "(", "vafs", ")", ".", "median", "(", ")" ]
Estimate purity based on 2 * median VAF. Even if the Cohort has a default filter_fn, ignore it: we want to use all variants for this estimate.
[ "Estimate", "purity", "based", "on", "2", "*", "median", "VAF", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/functions.py#L264-L282
hammerlab/cohorts
cohorts/model.py
bootstrap_auc
def bootstrap_auc(df, col, pred_col, n_bootstrap=1000): """ Calculate the boostrapped AUC for a given col trying to predict a pred_col. Parameters ---------- df : pandas.DataFrame col : str column to retrieve the values from pred_col : str the column we're trying to predict n_boostrap : int the number of bootstrap samples Returns ------- list : AUCs for each sampling """ scores = np.zeros(n_bootstrap) old_len = len(df) df.dropna(subset=[col], inplace=True) new_len = len(df) if new_len < old_len: logger.info("Dropping NaN values in %s to go from %d to %d rows" % (col, old_len, new_len)) preds = df[pred_col].astype(int) for i in range(n_bootstrap): sampled_counts, sampled_pred = resample(df[col], preds) if is_single_class(sampled_pred, col=pred_col): continue scores[i] = roc_auc_score(sampled_pred, sampled_counts) return scores
python
def bootstrap_auc(df, col, pred_col, n_bootstrap=1000): """ Calculate the boostrapped AUC for a given col trying to predict a pred_col. Parameters ---------- df : pandas.DataFrame col : str column to retrieve the values from pred_col : str the column we're trying to predict n_boostrap : int the number of bootstrap samples Returns ------- list : AUCs for each sampling """ scores = np.zeros(n_bootstrap) old_len = len(df) df.dropna(subset=[col], inplace=True) new_len = len(df) if new_len < old_len: logger.info("Dropping NaN values in %s to go from %d to %d rows" % (col, old_len, new_len)) preds = df[pred_col].astype(int) for i in range(n_bootstrap): sampled_counts, sampled_pred = resample(df[col], preds) if is_single_class(sampled_pred, col=pred_col): continue scores[i] = roc_auc_score(sampled_pred, sampled_counts) return scores
[ "def", "bootstrap_auc", "(", "df", ",", "col", ",", "pred_col", ",", "n_bootstrap", "=", "1000", ")", ":", "scores", "=", "np", ".", "zeros", "(", "n_bootstrap", ")", "old_len", "=", "len", "(", "df", ")", "df", ".", "dropna", "(", "subset", "=", "[", "col", "]", ",", "inplace", "=", "True", ")", "new_len", "=", "len", "(", "df", ")", "if", "new_len", "<", "old_len", ":", "logger", ".", "info", "(", "\"Dropping NaN values in %s to go from %d to %d rows\"", "%", "(", "col", ",", "old_len", ",", "new_len", ")", ")", "preds", "=", "df", "[", "pred_col", "]", ".", "astype", "(", "int", ")", "for", "i", "in", "range", "(", "n_bootstrap", ")", ":", "sampled_counts", ",", "sampled_pred", "=", "resample", "(", "df", "[", "col", "]", ",", "preds", ")", "if", "is_single_class", "(", "sampled_pred", ",", "col", "=", "pred_col", ")", ":", "continue", "scores", "[", "i", "]", "=", "roc_auc_score", "(", "sampled_pred", ",", "sampled_counts", ")", "return", "scores" ]
Calculate the boostrapped AUC for a given col trying to predict a pred_col. Parameters ---------- df : pandas.DataFrame col : str column to retrieve the values from pred_col : str the column we're trying to predict n_boostrap : int the number of bootstrap samples Returns ------- list : AUCs for each sampling
[ "Calculate", "the", "boostrapped", "AUC", "for", "a", "given", "col", "trying", "to", "predict", "a", "pred_col", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/model.py#L36-L66
alvarogzp/telegram-bot-framework
bot/multithreading/scheduler.py
SchedulerApi.set_callbacks
def set_callbacks(self, worker_start_callback: callable, worker_end_callback: callable, are_async: bool = False): """ :param are_async: True if the callbacks execute asynchronously, posting any heavy work to another thread. """ # We are setting self.worker_start_callback and self.worker_end_callback # to lambdas instead of saving them in private vars and moving the lambda logic # to a member function for, among other reasons, making callback updates atomic, # ie. once a callback has been posted, it will be executed as it was in that # moment, any call to set_callbacks will only affect callbacks posted since they # were updated, but not to any pending callback. # If callback is async, execute the start callback in the calling thread scheduler = self.immediate if are_async else self.background self.worker_start_callback = lambda worker: scheduler(Work( lambda: worker_start_callback(worker), "worker_start_callback:" + worker.name )) # As the end callback is called *just* before the thread dies, # there is no problem running it on the thread self.worker_end_callback = lambda worker: self.immediate(Work( lambda: worker_end_callback(worker), "worker_end_callback:" + worker.name ))
python
def set_callbacks(self, worker_start_callback: callable, worker_end_callback: callable, are_async: bool = False): """ :param are_async: True if the callbacks execute asynchronously, posting any heavy work to another thread. """ # We are setting self.worker_start_callback and self.worker_end_callback # to lambdas instead of saving them in private vars and moving the lambda logic # to a member function for, among other reasons, making callback updates atomic, # ie. once a callback has been posted, it will be executed as it was in that # moment, any call to set_callbacks will only affect callbacks posted since they # were updated, but not to any pending callback. # If callback is async, execute the start callback in the calling thread scheduler = self.immediate if are_async else self.background self.worker_start_callback = lambda worker: scheduler(Work( lambda: worker_start_callback(worker), "worker_start_callback:" + worker.name )) # As the end callback is called *just* before the thread dies, # there is no problem running it on the thread self.worker_end_callback = lambda worker: self.immediate(Work( lambda: worker_end_callback(worker), "worker_end_callback:" + worker.name ))
[ "def", "set_callbacks", "(", "self", ",", "worker_start_callback", ":", "callable", ",", "worker_end_callback", ":", "callable", ",", "are_async", ":", "bool", "=", "False", ")", ":", "# We are setting self.worker_start_callback and self.worker_end_callback", "# to lambdas instead of saving them in private vars and moving the lambda logic", "# to a member function for, among other reasons, making callback updates atomic,", "# ie. once a callback has been posted, it will be executed as it was in that", "# moment, any call to set_callbacks will only affect callbacks posted since they", "# were updated, but not to any pending callback.", "# If callback is async, execute the start callback in the calling thread", "scheduler", "=", "self", ".", "immediate", "if", "are_async", "else", "self", ".", "background", "self", ".", "worker_start_callback", "=", "lambda", "worker", ":", "scheduler", "(", "Work", "(", "lambda", ":", "worker_start_callback", "(", "worker", ")", ",", "\"worker_start_callback:\"", "+", "worker", ".", "name", ")", ")", "# As the end callback is called *just* before the thread dies,", "# there is no problem running it on the thread", "self", ".", "worker_end_callback", "=", "lambda", "worker", ":", "self", ".", "immediate", "(", "Work", "(", "lambda", ":", "worker_end_callback", "(", "worker", ")", ",", "\"worker_end_callback:\"", "+", "worker", ".", "name", ")", ")" ]
:param are_async: True if the callbacks execute asynchronously, posting any heavy work to another thread.
[ ":", "param", "are_async", ":", "True", "if", "the", "callbacks", "execute", "asynchronously", "posting", "any", "heavy", "work", "to", "another", "thread", "." ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/multithreading/scheduler.py#L59-L80
alvarogzp/telegram-bot-framework
bot/multithreading/scheduler.py
SchedulerApi._start_worker
def _start_worker(self, worker: Worker): """ Can be safely called multiple times on the same worker (for workers that support it) to start a new thread for it. """ # This function is called from main thread and from worker pools threads to start their children threads with self.running_workers_lock: self.running_workers.append(worker) thread = SchedulerThread(worker, self._worker_ended) thread.start() # This may or may not be posted to a background thread (see set_callbacks) self.worker_start_callback(worker)
python
def _start_worker(self, worker: Worker): """ Can be safely called multiple times on the same worker (for workers that support it) to start a new thread for it. """ # This function is called from main thread and from worker pools threads to start their children threads with self.running_workers_lock: self.running_workers.append(worker) thread = SchedulerThread(worker, self._worker_ended) thread.start() # This may or may not be posted to a background thread (see set_callbacks) self.worker_start_callback(worker)
[ "def", "_start_worker", "(", "self", ",", "worker", ":", "Worker", ")", ":", "# This function is called from main thread and from worker pools threads to start their children threads", "with", "self", ".", "running_workers_lock", ":", "self", ".", "running_workers", ".", "append", "(", "worker", ")", "thread", "=", "SchedulerThread", "(", "worker", ",", "self", ".", "_worker_ended", ")", "thread", ".", "start", "(", ")", "# This may or may not be posted to a background thread (see set_callbacks)", "self", ".", "worker_start_callback", "(", "worker", ")" ]
Can be safely called multiple times on the same worker (for workers that support it) to start a new thread for it.
[ "Can", "be", "safely", "called", "multiple", "times", "on", "the", "same", "worker", "(", "for", "workers", "that", "support", "it", ")", "to", "start", "a", "new", "thread", "for", "it", "." ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/multithreading/scheduler.py#L95-L106
alvarogzp/telegram-bot-framework
bot/multithreading/scheduler.py
SchedulerApi.new_worker
def new_worker(self, name: str): """Creates a new Worker and start a new Thread with it. Returns the Worker.""" if not self.running: return self.immediate_worker worker = self._new_worker(name) self._start_worker(worker) return worker
python
def new_worker(self, name: str): """Creates a new Worker and start a new Thread with it. Returns the Worker.""" if not self.running: return self.immediate_worker worker = self._new_worker(name) self._start_worker(worker) return worker
[ "def", "new_worker", "(", "self", ",", "name", ":", "str", ")", ":", "if", "not", "self", ".", "running", ":", "return", "self", ".", "immediate_worker", "worker", "=", "self", ".", "_new_worker", "(", "name", ")", "self", ".", "_start_worker", "(", "worker", ")", "return", "worker" ]
Creates a new Worker and start a new Thread with it. Returns the Worker.
[ "Creates", "a", "new", "Worker", "and", "start", "a", "new", "Thread", "with", "it", ".", "Returns", "the", "Worker", "." ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/multithreading/scheduler.py#L148-L154
alvarogzp/telegram-bot-framework
bot/multithreading/scheduler.py
SchedulerApi.new_worker_pool
def new_worker_pool(self, name: str, min_workers: int = 0, max_workers: int = 1, max_seconds_idle: int = DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE): """ Creates a new worker pool and starts it. Returns the Worker that schedules works to the pool. """ if not self.running: return self.immediate_worker worker = self._new_worker_pool(name, min_workers, max_workers, max_seconds_idle) self._start_worker_pool(worker) return worker
python
def new_worker_pool(self, name: str, min_workers: int = 0, max_workers: int = 1, max_seconds_idle: int = DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE): """ Creates a new worker pool and starts it. Returns the Worker that schedules works to the pool. """ if not self.running: return self.immediate_worker worker = self._new_worker_pool(name, min_workers, max_workers, max_seconds_idle) self._start_worker_pool(worker) return worker
[ "def", "new_worker_pool", "(", "self", ",", "name", ":", "str", ",", "min_workers", ":", "int", "=", "0", ",", "max_workers", ":", "int", "=", "1", ",", "max_seconds_idle", ":", "int", "=", "DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE", ")", ":", "if", "not", "self", ".", "running", ":", "return", "self", ".", "immediate_worker", "worker", "=", "self", ".", "_new_worker_pool", "(", "name", ",", "min_workers", ",", "max_workers", ",", "max_seconds_idle", ")", "self", ".", "_start_worker_pool", "(", "worker", ")", "return", "worker" ]
Creates a new worker pool and starts it. Returns the Worker that schedules works to the pool.
[ "Creates", "a", "new", "worker", "pool", "and", "starts", "it", ".", "Returns", "the", "Worker", "that", "schedules", "works", "to", "the", "pool", "." ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/multithreading/scheduler.py#L156-L166
hammerlab/cohorts
cohorts/cohort.py
Cohort.as_dataframe
def as_dataframe(self, on=None, join_with=None, join_how=None, return_cols=False, rename_cols=False, keep_paren_contents=True, **kwargs): """ Return this Cohort as a DataFrame, and optionally include additional columns using `on`. on : str or function or list or dict, optional - A column name. - Or a function that creates a new column for comparison, e.g. count.snv_count. - Or a list of column-generating functions or column names. - Or a map of new column names to their column-generating functions or column names. If `on` is a function or functions, kwargs is passed to those functions. Otherwise kwargs is ignored. Other parameters ---------------- `return_cols`: (bool) If True, return column names generated via `on` along with the `DataFrame` as a `DataFrameHolder` tuple. `rename_cols`: (bool) If True, then return columns using "stripped" column names ("stripped" means lower-case names without punctuation other than `_`) See `utils.strip_column_names` for more details defaults to False `keep_paren_contents`: (bool) If True, then contents of column names within parens are kept. If False, contents of column names within-parens are dropped. Defaults to True ---------- Return : `DataFrame` (or `DataFrameHolder` if `return_cols` is True) """ df = self._as_dataframe_unmodified(join_with=join_with, join_how=join_how) if on is None: return DataFrameHolder.return_obj(None, df, return_cols) if type(on) == str: return DataFrameHolder.return_obj(on, df, return_cols) def apply_func(on, col, df): """ Sometimes we have functions that, by necessity, have more parameters than just `row`. We construct a function with just the `row` parameter so it can be sent to `DataFrame.apply`. We hackishly pass `cohort` (as `self`) along if the function accepts a `cohort` argument. """ on_argnames = on.__code__.co_varnames if "cohort" not in on_argnames: func = lambda row: on(row=row, **kwargs) else: func = lambda row: on(row=row, cohort=self, **kwargs) if self.show_progress: tqdm.pandas(desc=col) df[col] = df.progress_apply(func, axis=1) ## depends on tqdm on prev line else: df[col] = df.apply(func, axis=1) return DataFrameHolder(col, df) def func_name(func, num=0): return func.__name__ if not is_lambda(func) else "column_%d" % num def is_lambda(func): return func.__name__ == (lambda: None).__name__ if type(on) == FunctionType: return apply_func(on, func_name(on), df).return_self(return_cols) if len(kwargs) > 0: logger.warning("Note: kwargs used with multiple functions; passing them to all functions") if type(on) == dict: cols = [] for key, value in on.items(): if type(value) == str: df[key] = df[value] col = key elif type(value) == FunctionType: col, df = apply_func(on=value, col=key, df=df) else: raise ValueError("A value of `on`, %s, is not a str or function" % str(value)) cols.append(col) if type(on) == list: cols = [] for i, elem in enumerate(on): if type(elem) == str: col = elem elif type(elem) == FunctionType: col = func_name(elem, i) col, df = apply_func(on=elem, col=col, df=df) cols.append(col) if rename_cols: rename_dict = _strip_column_names(df.columns, keep_paren_contents=keep_paren_contents) df.rename(columns=rename_dict, inplace=True) cols = [rename_dict[col] for col in cols] return DataFrameHolder(cols, df).return_self(return_cols)
python
def as_dataframe(self, on=None, join_with=None, join_how=None, return_cols=False, rename_cols=False, keep_paren_contents=True, **kwargs): """ Return this Cohort as a DataFrame, and optionally include additional columns using `on`. on : str or function or list or dict, optional - A column name. - Or a function that creates a new column for comparison, e.g. count.snv_count. - Or a list of column-generating functions or column names. - Or a map of new column names to their column-generating functions or column names. If `on` is a function or functions, kwargs is passed to those functions. Otherwise kwargs is ignored. Other parameters ---------------- `return_cols`: (bool) If True, return column names generated via `on` along with the `DataFrame` as a `DataFrameHolder` tuple. `rename_cols`: (bool) If True, then return columns using "stripped" column names ("stripped" means lower-case names without punctuation other than `_`) See `utils.strip_column_names` for more details defaults to False `keep_paren_contents`: (bool) If True, then contents of column names within parens are kept. If False, contents of column names within-parens are dropped. Defaults to True ---------- Return : `DataFrame` (or `DataFrameHolder` if `return_cols` is True) """ df = self._as_dataframe_unmodified(join_with=join_with, join_how=join_how) if on is None: return DataFrameHolder.return_obj(None, df, return_cols) if type(on) == str: return DataFrameHolder.return_obj(on, df, return_cols) def apply_func(on, col, df): """ Sometimes we have functions that, by necessity, have more parameters than just `row`. We construct a function with just the `row` parameter so it can be sent to `DataFrame.apply`. We hackishly pass `cohort` (as `self`) along if the function accepts a `cohort` argument. """ on_argnames = on.__code__.co_varnames if "cohort" not in on_argnames: func = lambda row: on(row=row, **kwargs) else: func = lambda row: on(row=row, cohort=self, **kwargs) if self.show_progress: tqdm.pandas(desc=col) df[col] = df.progress_apply(func, axis=1) ## depends on tqdm on prev line else: df[col] = df.apply(func, axis=1) return DataFrameHolder(col, df) def func_name(func, num=0): return func.__name__ if not is_lambda(func) else "column_%d" % num def is_lambda(func): return func.__name__ == (lambda: None).__name__ if type(on) == FunctionType: return apply_func(on, func_name(on), df).return_self(return_cols) if len(kwargs) > 0: logger.warning("Note: kwargs used with multiple functions; passing them to all functions") if type(on) == dict: cols = [] for key, value in on.items(): if type(value) == str: df[key] = df[value] col = key elif type(value) == FunctionType: col, df = apply_func(on=value, col=key, df=df) else: raise ValueError("A value of `on`, %s, is not a str or function" % str(value)) cols.append(col) if type(on) == list: cols = [] for i, elem in enumerate(on): if type(elem) == str: col = elem elif type(elem) == FunctionType: col = func_name(elem, i) col, df = apply_func(on=elem, col=col, df=df) cols.append(col) if rename_cols: rename_dict = _strip_column_names(df.columns, keep_paren_contents=keep_paren_contents) df.rename(columns=rename_dict, inplace=True) cols = [rename_dict[col] for col in cols] return DataFrameHolder(cols, df).return_self(return_cols)
[ "def", "as_dataframe", "(", "self", ",", "on", "=", "None", ",", "join_with", "=", "None", ",", "join_how", "=", "None", ",", "return_cols", "=", "False", ",", "rename_cols", "=", "False", ",", "keep_paren_contents", "=", "True", ",", "*", "*", "kwargs", ")", ":", "df", "=", "self", ".", "_as_dataframe_unmodified", "(", "join_with", "=", "join_with", ",", "join_how", "=", "join_how", ")", "if", "on", "is", "None", ":", "return", "DataFrameHolder", ".", "return_obj", "(", "None", ",", "df", ",", "return_cols", ")", "if", "type", "(", "on", ")", "==", "str", ":", "return", "DataFrameHolder", ".", "return_obj", "(", "on", ",", "df", ",", "return_cols", ")", "def", "apply_func", "(", "on", ",", "col", ",", "df", ")", ":", "\"\"\"\n Sometimes we have functions that, by necessity, have more parameters\n than just `row`. We construct a function with just the `row` parameter\n so it can be sent to `DataFrame.apply`. We hackishly pass `cohort`\n (as `self`) along if the function accepts a `cohort` argument.\n \"\"\"", "on_argnames", "=", "on", ".", "__code__", ".", "co_varnames", "if", "\"cohort\"", "not", "in", "on_argnames", ":", "func", "=", "lambda", "row", ":", "on", "(", "row", "=", "row", ",", "*", "*", "kwargs", ")", "else", ":", "func", "=", "lambda", "row", ":", "on", "(", "row", "=", "row", ",", "cohort", "=", "self", ",", "*", "*", "kwargs", ")", "if", "self", ".", "show_progress", ":", "tqdm", ".", "pandas", "(", "desc", "=", "col", ")", "df", "[", "col", "]", "=", "df", ".", "progress_apply", "(", "func", ",", "axis", "=", "1", ")", "## depends on tqdm on prev line", "else", ":", "df", "[", "col", "]", "=", "df", ".", "apply", "(", "func", ",", "axis", "=", "1", ")", "return", "DataFrameHolder", "(", "col", ",", "df", ")", "def", "func_name", "(", "func", ",", "num", "=", "0", ")", ":", "return", "func", ".", "__name__", "if", "not", "is_lambda", "(", "func", ")", "else", "\"column_%d\"", "%", "num", "def", "is_lambda", "(", "func", ")", ":", "return", "func", ".", "__name__", "==", "(", "lambda", ":", "None", ")", ".", "__name__", "if", "type", "(", "on", ")", "==", "FunctionType", ":", "return", "apply_func", "(", "on", ",", "func_name", "(", "on", ")", ",", "df", ")", ".", "return_self", "(", "return_cols", ")", "if", "len", "(", "kwargs", ")", ">", "0", ":", "logger", ".", "warning", "(", "\"Note: kwargs used with multiple functions; passing them to all functions\"", ")", "if", "type", "(", "on", ")", "==", "dict", ":", "cols", "=", "[", "]", "for", "key", ",", "value", "in", "on", ".", "items", "(", ")", ":", "if", "type", "(", "value", ")", "==", "str", ":", "df", "[", "key", "]", "=", "df", "[", "value", "]", "col", "=", "key", "elif", "type", "(", "value", ")", "==", "FunctionType", ":", "col", ",", "df", "=", "apply_func", "(", "on", "=", "value", ",", "col", "=", "key", ",", "df", "=", "df", ")", "else", ":", "raise", "ValueError", "(", "\"A value of `on`, %s, is not a str or function\"", "%", "str", "(", "value", ")", ")", "cols", ".", "append", "(", "col", ")", "if", "type", "(", "on", ")", "==", "list", ":", "cols", "=", "[", "]", "for", "i", ",", "elem", "in", "enumerate", "(", "on", ")", ":", "if", "type", "(", "elem", ")", "==", "str", ":", "col", "=", "elem", "elif", "type", "(", "elem", ")", "==", "FunctionType", ":", "col", "=", "func_name", "(", "elem", ",", "i", ")", "col", ",", "df", "=", "apply_func", "(", "on", "=", "elem", ",", "col", "=", "col", ",", "df", "=", "df", ")", "cols", ".", "append", "(", "col", ")", "if", "rename_cols", ":", "rename_dict", "=", "_strip_column_names", "(", "df", ".", "columns", ",", "keep_paren_contents", "=", "keep_paren_contents", ")", "df", ".", "rename", "(", "columns", "=", "rename_dict", ",", "inplace", "=", "True", ")", "cols", "=", "[", "rename_dict", "[", "col", "]", "for", "col", "in", "cols", "]", "return", "DataFrameHolder", "(", "cols", ",", "df", ")", ".", "return_self", "(", "return_cols", ")" ]
Return this Cohort as a DataFrame, and optionally include additional columns using `on`. on : str or function or list or dict, optional - A column name. - Or a function that creates a new column for comparison, e.g. count.snv_count. - Or a list of column-generating functions or column names. - Or a map of new column names to their column-generating functions or column names. If `on` is a function or functions, kwargs is passed to those functions. Otherwise kwargs is ignored. Other parameters ---------------- `return_cols`: (bool) If True, return column names generated via `on` along with the `DataFrame` as a `DataFrameHolder` tuple. `rename_cols`: (bool) If True, then return columns using "stripped" column names ("stripped" means lower-case names without punctuation other than `_`) See `utils.strip_column_names` for more details defaults to False `keep_paren_contents`: (bool) If True, then contents of column names within parens are kept. If False, contents of column names within-parens are dropped. Defaults to True ---------- Return : `DataFrame` (or `DataFrameHolder` if `return_cols` is True)
[ "Return", "this", "Cohort", "as", "a", "DataFrame", "and", "optionally", "include", "additional", "columns", "using", "on", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L302-L400
hammerlab/cohorts
cohorts/cohort.py
Cohort.load_dataframe
def load_dataframe(self, df_loader_name): """ Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes we may want to just directly load a particular DataFrame. """ logger.debug("loading dataframe: {}".format(df_loader_name)) # Get the DataFrameLoader object corresponding to this name. df_loaders = [df_loader for df_loader in self.df_loaders if df_loader.name == df_loader_name] if len(df_loaders) == 0: raise ValueError("No DataFrameLoader with name %s" % df_loader_name) if len(df_loaders) > 1: raise ValueError("Multiple DataFrameLoaders with name %s" % df_loader_name) return df_loaders[0].load_dataframe()
python
def load_dataframe(self, df_loader_name): """ Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes we may want to just directly load a particular DataFrame. """ logger.debug("loading dataframe: {}".format(df_loader_name)) # Get the DataFrameLoader object corresponding to this name. df_loaders = [df_loader for df_loader in self.df_loaders if df_loader.name == df_loader_name] if len(df_loaders) == 0: raise ValueError("No DataFrameLoader with name %s" % df_loader_name) if len(df_loaders) > 1: raise ValueError("Multiple DataFrameLoaders with name %s" % df_loader_name) return df_loaders[0].load_dataframe()
[ "def", "load_dataframe", "(", "self", ",", "df_loader_name", ")", ":", "logger", ".", "debug", "(", "\"loading dataframe: {}\"", ".", "format", "(", "df_loader_name", ")", ")", "# Get the DataFrameLoader object corresponding to this name.", "df_loaders", "=", "[", "df_loader", "for", "df_loader", "in", "self", ".", "df_loaders", "if", "df_loader", ".", "name", "==", "df_loader_name", "]", "if", "len", "(", "df_loaders", ")", "==", "0", ":", "raise", "ValueError", "(", "\"No DataFrameLoader with name %s\"", "%", "df_loader_name", ")", "if", "len", "(", "df_loaders", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Multiple DataFrameLoaders with name %s\"", "%", "df_loader_name", ")", "return", "df_loaders", "[", "0", "]", ".", "load_dataframe", "(", ")" ]
Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes we may want to just directly load a particular DataFrame.
[ "Instead", "of", "joining", "a", "DataFrameJoiner", "with", "the", "Cohort", "in", "as_dataframe", "sometimes", "we", "may", "want", "to", "just", "directly", "load", "a", "particular", "DataFrame", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L402-L416
hammerlab/cohorts
cohorts/cohort.py
Cohort._get_function_name
def _get_function_name(self, fn, default="None"): """ Return name of function, using default value if function not defined """ if fn is None: fn_name = default else: fn_name = fn.__name__ return fn_name
python
def _get_function_name(self, fn, default="None"): """ Return name of function, using default value if function not defined """ if fn is None: fn_name = default else: fn_name = fn.__name__ return fn_name
[ "def", "_get_function_name", "(", "self", ",", "fn", ",", "default", "=", "\"None\"", ")", ":", "if", "fn", "is", "None", ":", "fn_name", "=", "default", "else", ":", "fn_name", "=", "fn", ".", "__name__", "return", "fn_name" ]
Return name of function, using default value if function not defined
[ "Return", "name", "of", "function", "using", "default", "value", "if", "function", "not", "defined" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L502-L509
hammerlab/cohorts
cohorts/cohort.py
Cohort.load_variants
def load_variants(self, patients=None, filter_fn=None, **kwargs): """Load a dictionary of patient_id to varcode.VariantCollection Parameters ---------- patients : str, optional Filter to a subset of patients filter_fn : function Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved. Overrides default self.filter_fn. `None` passes through to self.filter_fn. Returns ------- merged_variants Dictionary of patient_id to VariantCollection """ filter_fn = first_not_none_param([filter_fn, self.filter_fn], no_filter) filter_fn_name = self._get_function_name(filter_fn) logger.debug("loading variants with filter_fn: {}".format(filter_fn_name)) patient_variants = {} for patient in self.iter_patients(patients): variants = self._load_single_patient_variants(patient, filter_fn, **kwargs) if variants is not None: patient_variants[patient.id] = variants return patient_variants
python
def load_variants(self, patients=None, filter_fn=None, **kwargs): """Load a dictionary of patient_id to varcode.VariantCollection Parameters ---------- patients : str, optional Filter to a subset of patients filter_fn : function Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved. Overrides default self.filter_fn. `None` passes through to self.filter_fn. Returns ------- merged_variants Dictionary of patient_id to VariantCollection """ filter_fn = first_not_none_param([filter_fn, self.filter_fn], no_filter) filter_fn_name = self._get_function_name(filter_fn) logger.debug("loading variants with filter_fn: {}".format(filter_fn_name)) patient_variants = {} for patient in self.iter_patients(patients): variants = self._load_single_patient_variants(patient, filter_fn, **kwargs) if variants is not None: patient_variants[patient.id] = variants return patient_variants
[ "def", "load_variants", "(", "self", ",", "patients", "=", "None", ",", "filter_fn", "=", "None", ",", "*", "*", "kwargs", ")", ":", "filter_fn", "=", "first_not_none_param", "(", "[", "filter_fn", ",", "self", ".", "filter_fn", "]", ",", "no_filter", ")", "filter_fn_name", "=", "self", ".", "_get_function_name", "(", "filter_fn", ")", "logger", ".", "debug", "(", "\"loading variants with filter_fn: {}\"", ".", "format", "(", "filter_fn_name", ")", ")", "patient_variants", "=", "{", "}", "for", "patient", "in", "self", ".", "iter_patients", "(", "patients", ")", ":", "variants", "=", "self", ".", "_load_single_patient_variants", "(", "patient", ",", "filter_fn", ",", "*", "*", "kwargs", ")", "if", "variants", "is", "not", "None", ":", "patient_variants", "[", "patient", ".", "id", "]", "=", "variants", "return", "patient_variants" ]
Load a dictionary of patient_id to varcode.VariantCollection Parameters ---------- patients : str, optional Filter to a subset of patients filter_fn : function Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved. Overrides default self.filter_fn. `None` passes through to self.filter_fn. Returns ------- merged_variants Dictionary of patient_id to VariantCollection
[ "Load", "a", "dictionary", "of", "patient_id", "to", "varcode", ".", "VariantCollection" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L511-L536
hammerlab/cohorts
cohorts/cohort.py
Cohort._hash_filter_fn
def _hash_filter_fn(self, filter_fn, **kwargs): """ Construct string representing state of filter_fn Used to cache filtered variants or effects uniquely depending on filter fn values """ filter_fn_name = self._get_function_name(filter_fn, default="filter-none") logger.debug("Computing hash for filter_fn: {} with kwargs {}".format(filter_fn_name, str(dict(**kwargs)))) # hash function source code fn_source = str(dill.source.getsource(filter_fn)) pickled_fn_source = pickle.dumps(fn_source) ## encode as byte string hashed_fn_source = int(hashlib.sha1(pickled_fn_source).hexdigest(), 16) % (10 ** 11) # hash kwarg values kw_dict = dict(**kwargs) kw_hash = list() if not kw_dict: kw_hash = ["default"] else: [kw_hash.append("{}-{}".format(key, h)) for (key, h) in sorted(kw_dict.items())] # hash closure vars - for case where filter_fn is defined within closure of filter_fn closure = [] nonlocals = inspect.getclosurevars(filter_fn).nonlocals for (key, val) in nonlocals.items(): ## capture hash for any function within closure if inspect.isfunction(val): closure.append(self._hash_filter_fn(val)) closure.sort() # Sorted for file name consistency closure_str = "null" if len(closure) == 0 else "-".join(closure) # construct final string comprising hashed components hashed_fn = ".".join(["-".join([filter_fn_name, str(hashed_fn_source)]), ".".join(kw_hash), closure_str] ) return hashed_fn
python
def _hash_filter_fn(self, filter_fn, **kwargs): """ Construct string representing state of filter_fn Used to cache filtered variants or effects uniquely depending on filter fn values """ filter_fn_name = self._get_function_name(filter_fn, default="filter-none") logger.debug("Computing hash for filter_fn: {} with kwargs {}".format(filter_fn_name, str(dict(**kwargs)))) # hash function source code fn_source = str(dill.source.getsource(filter_fn)) pickled_fn_source = pickle.dumps(fn_source) ## encode as byte string hashed_fn_source = int(hashlib.sha1(pickled_fn_source).hexdigest(), 16) % (10 ** 11) # hash kwarg values kw_dict = dict(**kwargs) kw_hash = list() if not kw_dict: kw_hash = ["default"] else: [kw_hash.append("{}-{}".format(key, h)) for (key, h) in sorted(kw_dict.items())] # hash closure vars - for case where filter_fn is defined within closure of filter_fn closure = [] nonlocals = inspect.getclosurevars(filter_fn).nonlocals for (key, val) in nonlocals.items(): ## capture hash for any function within closure if inspect.isfunction(val): closure.append(self._hash_filter_fn(val)) closure.sort() # Sorted for file name consistency closure_str = "null" if len(closure) == 0 else "-".join(closure) # construct final string comprising hashed components hashed_fn = ".".join(["-".join([filter_fn_name, str(hashed_fn_source)]), ".".join(kw_hash), closure_str] ) return hashed_fn
[ "def", "_hash_filter_fn", "(", "self", ",", "filter_fn", ",", "*", "*", "kwargs", ")", ":", "filter_fn_name", "=", "self", ".", "_get_function_name", "(", "filter_fn", ",", "default", "=", "\"filter-none\"", ")", "logger", ".", "debug", "(", "\"Computing hash for filter_fn: {} with kwargs {}\"", ".", "format", "(", "filter_fn_name", ",", "str", "(", "dict", "(", "*", "*", "kwargs", ")", ")", ")", ")", "# hash function source code", "fn_source", "=", "str", "(", "dill", ".", "source", ".", "getsource", "(", "filter_fn", ")", ")", "pickled_fn_source", "=", "pickle", ".", "dumps", "(", "fn_source", ")", "## encode as byte string", "hashed_fn_source", "=", "int", "(", "hashlib", ".", "sha1", "(", "pickled_fn_source", ")", ".", "hexdigest", "(", ")", ",", "16", ")", "%", "(", "10", "**", "11", ")", "# hash kwarg values", "kw_dict", "=", "dict", "(", "*", "*", "kwargs", ")", "kw_hash", "=", "list", "(", ")", "if", "not", "kw_dict", ":", "kw_hash", "=", "[", "\"default\"", "]", "else", ":", "[", "kw_hash", ".", "append", "(", "\"{}-{}\"", ".", "format", "(", "key", ",", "h", ")", ")", "for", "(", "key", ",", "h", ")", "in", "sorted", "(", "kw_dict", ".", "items", "(", ")", ")", "]", "# hash closure vars - for case where filter_fn is defined within closure of filter_fn", "closure", "=", "[", "]", "nonlocals", "=", "inspect", ".", "getclosurevars", "(", "filter_fn", ")", ".", "nonlocals", "for", "(", "key", ",", "val", ")", "in", "nonlocals", ".", "items", "(", ")", ":", "## capture hash for any function within closure", "if", "inspect", ".", "isfunction", "(", "val", ")", ":", "closure", ".", "append", "(", "self", ".", "_hash_filter_fn", "(", "val", ")", ")", "closure", ".", "sort", "(", ")", "# Sorted for file name consistency", "closure_str", "=", "\"null\"", "if", "len", "(", "closure", ")", "==", "0", "else", "\"-\"", ".", "join", "(", "closure", ")", "# construct final string comprising hashed components", "hashed_fn", "=", "\".\"", ".", "join", "(", "[", "\"-\"", ".", "join", "(", "[", "filter_fn_name", ",", "str", "(", "hashed_fn_source", ")", "]", ")", ",", "\".\"", ".", "join", "(", "kw_hash", ")", ",", "closure_str", "]", ")", "return", "hashed_fn" ]
Construct string representing state of filter_fn Used to cache filtered variants or effects uniquely depending on filter fn values
[ "Construct", "string", "representing", "state", "of", "filter_fn", "Used", "to", "cache", "filtered", "variants", "or", "effects", "uniquely", "depending", "on", "filter", "fn", "values" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L538-L570
hammerlab/cohorts
cohorts/cohort.py
Cohort._load_single_patient_variants
def _load_single_patient_variants(self, patient, filter_fn, use_cache=True, **kwargs): """ Load filtered, merged variants for a single patient, optionally using cache Note that filtered variants are first merged before filtering, and each step is cached independently. Turn on debug statements for more details about cached files. Use `_load_single_patient_merged_variants` to see merged variants without filtering. """ if filter_fn is None: use_filtered_cache = False else: filter_fn_name = self._get_function_name(filter_fn) logger.debug("loading variants for patient {} with filter_fn {}".format(patient.id, filter_fn_name)) use_filtered_cache = use_cache ## confirm that we can get cache-name (else don't use filtered cache) if use_filtered_cache: logger.debug("... identifying filtered-cache file name") try: ## try to load filtered variants from cache filtered_cache_file_name = "%s-variants.%s.pkl" % (self.merge_type, self._hash_filter_fn(filter_fn, **kwargs)) except: logger.warning("... error identifying filtered-cache file name for patient {}: {}".format( patient.id, filter_fn_name)) use_filtered_cache = False else: logger.debug("... trying to load filtered variants from cache: {}".format(filtered_cache_file_name)) try: cached = self.load_from_cache(self.cache_names["variant"], patient.id, filtered_cache_file_name) if cached is not None: return cached except: logger.warning("Error loading variants from cache for patient: {}".format(patient.id)) pass ## get merged variants logger.debug("... getting merged variants for: {}".format(patient.id)) merged_variants = self._load_single_patient_merged_variants(patient, use_cache=use_cache) # Note None here is different from 0. We want to preserve None if merged_variants is None: logger.info("Variants did not exist for patient %s" % patient.id) return None logger.debug("... applying filters to variants for: {}".format(patient.id)) filtered_variants = filter_variants(variant_collection=merged_variants, patient=patient, filter_fn=filter_fn, **kwargs) if use_filtered_cache: logger.debug("... saving filtered variants to cache: {}".format(filtered_cache_file_name)) self.save_to_cache(filtered_variants, self.cache_names["variant"], patient.id, filtered_cache_file_name) return filtered_variants
python
def _load_single_patient_variants(self, patient, filter_fn, use_cache=True, **kwargs): """ Load filtered, merged variants for a single patient, optionally using cache Note that filtered variants are first merged before filtering, and each step is cached independently. Turn on debug statements for more details about cached files. Use `_load_single_patient_merged_variants` to see merged variants without filtering. """ if filter_fn is None: use_filtered_cache = False else: filter_fn_name = self._get_function_name(filter_fn) logger.debug("loading variants for patient {} with filter_fn {}".format(patient.id, filter_fn_name)) use_filtered_cache = use_cache ## confirm that we can get cache-name (else don't use filtered cache) if use_filtered_cache: logger.debug("... identifying filtered-cache file name") try: ## try to load filtered variants from cache filtered_cache_file_name = "%s-variants.%s.pkl" % (self.merge_type, self._hash_filter_fn(filter_fn, **kwargs)) except: logger.warning("... error identifying filtered-cache file name for patient {}: {}".format( patient.id, filter_fn_name)) use_filtered_cache = False else: logger.debug("... trying to load filtered variants from cache: {}".format(filtered_cache_file_name)) try: cached = self.load_from_cache(self.cache_names["variant"], patient.id, filtered_cache_file_name) if cached is not None: return cached except: logger.warning("Error loading variants from cache for patient: {}".format(patient.id)) pass ## get merged variants logger.debug("... getting merged variants for: {}".format(patient.id)) merged_variants = self._load_single_patient_merged_variants(patient, use_cache=use_cache) # Note None here is different from 0. We want to preserve None if merged_variants is None: logger.info("Variants did not exist for patient %s" % patient.id) return None logger.debug("... applying filters to variants for: {}".format(patient.id)) filtered_variants = filter_variants(variant_collection=merged_variants, patient=patient, filter_fn=filter_fn, **kwargs) if use_filtered_cache: logger.debug("... saving filtered variants to cache: {}".format(filtered_cache_file_name)) self.save_to_cache(filtered_variants, self.cache_names["variant"], patient.id, filtered_cache_file_name) return filtered_variants
[ "def", "_load_single_patient_variants", "(", "self", ",", "patient", ",", "filter_fn", ",", "use_cache", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "filter_fn", "is", "None", ":", "use_filtered_cache", "=", "False", "else", ":", "filter_fn_name", "=", "self", ".", "_get_function_name", "(", "filter_fn", ")", "logger", ".", "debug", "(", "\"loading variants for patient {} with filter_fn {}\"", ".", "format", "(", "patient", ".", "id", ",", "filter_fn_name", ")", ")", "use_filtered_cache", "=", "use_cache", "## confirm that we can get cache-name (else don't use filtered cache)", "if", "use_filtered_cache", ":", "logger", ".", "debug", "(", "\"... identifying filtered-cache file name\"", ")", "try", ":", "## try to load filtered variants from cache", "filtered_cache_file_name", "=", "\"%s-variants.%s.pkl\"", "%", "(", "self", ".", "merge_type", ",", "self", ".", "_hash_filter_fn", "(", "filter_fn", ",", "*", "*", "kwargs", ")", ")", "except", ":", "logger", ".", "warning", "(", "\"... error identifying filtered-cache file name for patient {}: {}\"", ".", "format", "(", "patient", ".", "id", ",", "filter_fn_name", ")", ")", "use_filtered_cache", "=", "False", "else", ":", "logger", ".", "debug", "(", "\"... trying to load filtered variants from cache: {}\"", ".", "format", "(", "filtered_cache_file_name", ")", ")", "try", ":", "cached", "=", "self", ".", "load_from_cache", "(", "self", ".", "cache_names", "[", "\"variant\"", "]", ",", "patient", ".", "id", ",", "filtered_cache_file_name", ")", "if", "cached", "is", "not", "None", ":", "return", "cached", "except", ":", "logger", ".", "warning", "(", "\"Error loading variants from cache for patient: {}\"", ".", "format", "(", "patient", ".", "id", ")", ")", "pass", "## get merged variants", "logger", ".", "debug", "(", "\"... getting merged variants for: {}\"", ".", "format", "(", "patient", ".", "id", ")", ")", "merged_variants", "=", "self", ".", "_load_single_patient_merged_variants", "(", "patient", ",", "use_cache", "=", "use_cache", ")", "# Note None here is different from 0. We want to preserve None", "if", "merged_variants", "is", "None", ":", "logger", ".", "info", "(", "\"Variants did not exist for patient %s\"", "%", "patient", ".", "id", ")", "return", "None", "logger", ".", "debug", "(", "\"... applying filters to variants for: {}\"", ".", "format", "(", "patient", ".", "id", ")", ")", "filtered_variants", "=", "filter_variants", "(", "variant_collection", "=", "merged_variants", ",", "patient", "=", "patient", ",", "filter_fn", "=", "filter_fn", ",", "*", "*", "kwargs", ")", "if", "use_filtered_cache", ":", "logger", ".", "debug", "(", "\"... saving filtered variants to cache: {}\"", ".", "format", "(", "filtered_cache_file_name", ")", ")", "self", ".", "save_to_cache", "(", "filtered_variants", ",", "self", ".", "cache_names", "[", "\"variant\"", "]", ",", "patient", ".", "id", ",", "filtered_cache_file_name", ")", "return", "filtered_variants" ]
Load filtered, merged variants for a single patient, optionally using cache Note that filtered variants are first merged before filtering, and each step is cached independently. Turn on debug statements for more details about cached files. Use `_load_single_patient_merged_variants` to see merged variants without filtering.
[ "Load", "filtered", "merged", "variants", "for", "a", "single", "patient", "optionally", "using", "cache" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L572-L626
hammerlab/cohorts
cohorts/cohort.py
Cohort._load_single_patient_merged_variants
def _load_single_patient_merged_variants(self, patient, use_cache=True): """ Load merged variants for a single patient, optionally using cache Note that merged variants are not filtered. Use `_load_single_patient_variants` to get filtered variants """ logger.debug("loading merged variants for patient {}".format(patient.id)) no_variants = False try: # get merged-variants from cache if use_cache: ## load unfiltered variants into list of collections variant_cache_file_name = "%s-variants.pkl" % (self.merge_type) merged_variants = self.load_from_cache(self.cache_names["variant"], patient.id, variant_cache_file_name) if merged_variants is not None: return merged_variants # get variant collections from file variant_collections = [] optional_maf_cols = ["t_ref_count", "t_alt_count", "n_ref_count", "n_alt_count"] if self.additional_maf_cols is not None: optional_maf_cols.extend(self.additional_maf_cols) for patient_variants in patient.variants_list: if type(patient_variants) == str: if ".vcf" in patient_variants: try: variant_collections.append(varcode.load_vcf_fast(patient_variants)) # StopIteration is thrown for empty VCFs. For an empty VCF, don't append any variants, # and don't throw an error. But do record a warning, in case the StopIteration was # thrown for another reason. except StopIteration as e: logger.warning("Empty VCF (or possibly a VCF error) for patient {}: {}".format( patient.id, str(e))) elif ".maf" in patient_variants: # See variant_stats.maf_somatic_variant_stats variant_collections.append( varcode.load_maf( patient_variants, optional_cols=optional_maf_cols, encoding="latin-1")) else: raise ValueError("Don't know how to read %s" % patient_variants) elif type(patient_variants) == VariantCollection: variant_collections.append(patient_variants) else: raise ValueError("Don't know how to read %s" % patient_variants) # merge variant-collections if len(variant_collections) == 0: no_variants = True elif len(variant_collections) == 1: # There is nothing to merge variants = variant_collections[0] merged_variants = variants else: merged_variants = self._merge_variant_collections(variant_collections, self.merge_type) except IOError: no_variants = True # Note that this is the number of variant collections and not the number of # variants. 0 variants will lead to 0 neoantigens, for example, but 0 variant # collections will lead to NaN variants and neoantigens. if no_variants: print("Variants did not exist for patient %s" % patient.id) merged_variants = None # save merged variants to file if use_cache: self.save_to_cache(merged_variants, self.cache_names["variant"], patient.id, variant_cache_file_name) return merged_variants
python
def _load_single_patient_merged_variants(self, patient, use_cache=True): """ Load merged variants for a single patient, optionally using cache Note that merged variants are not filtered. Use `_load_single_patient_variants` to get filtered variants """ logger.debug("loading merged variants for patient {}".format(patient.id)) no_variants = False try: # get merged-variants from cache if use_cache: ## load unfiltered variants into list of collections variant_cache_file_name = "%s-variants.pkl" % (self.merge_type) merged_variants = self.load_from_cache(self.cache_names["variant"], patient.id, variant_cache_file_name) if merged_variants is not None: return merged_variants # get variant collections from file variant_collections = [] optional_maf_cols = ["t_ref_count", "t_alt_count", "n_ref_count", "n_alt_count"] if self.additional_maf_cols is not None: optional_maf_cols.extend(self.additional_maf_cols) for patient_variants in patient.variants_list: if type(patient_variants) == str: if ".vcf" in patient_variants: try: variant_collections.append(varcode.load_vcf_fast(patient_variants)) # StopIteration is thrown for empty VCFs. For an empty VCF, don't append any variants, # and don't throw an error. But do record a warning, in case the StopIteration was # thrown for another reason. except StopIteration as e: logger.warning("Empty VCF (or possibly a VCF error) for patient {}: {}".format( patient.id, str(e))) elif ".maf" in patient_variants: # See variant_stats.maf_somatic_variant_stats variant_collections.append( varcode.load_maf( patient_variants, optional_cols=optional_maf_cols, encoding="latin-1")) else: raise ValueError("Don't know how to read %s" % patient_variants) elif type(patient_variants) == VariantCollection: variant_collections.append(patient_variants) else: raise ValueError("Don't know how to read %s" % patient_variants) # merge variant-collections if len(variant_collections) == 0: no_variants = True elif len(variant_collections) == 1: # There is nothing to merge variants = variant_collections[0] merged_variants = variants else: merged_variants = self._merge_variant_collections(variant_collections, self.merge_type) except IOError: no_variants = True # Note that this is the number of variant collections and not the number of # variants. 0 variants will lead to 0 neoantigens, for example, but 0 variant # collections will lead to NaN variants and neoantigens. if no_variants: print("Variants did not exist for patient %s" % patient.id) merged_variants = None # save merged variants to file if use_cache: self.save_to_cache(merged_variants, self.cache_names["variant"], patient.id, variant_cache_file_name) return merged_variants
[ "def", "_load_single_patient_merged_variants", "(", "self", ",", "patient", ",", "use_cache", "=", "True", ")", ":", "logger", ".", "debug", "(", "\"loading merged variants for patient {}\"", ".", "format", "(", "patient", ".", "id", ")", ")", "no_variants", "=", "False", "try", ":", "# get merged-variants from cache", "if", "use_cache", ":", "## load unfiltered variants into list of collections", "variant_cache_file_name", "=", "\"%s-variants.pkl\"", "%", "(", "self", ".", "merge_type", ")", "merged_variants", "=", "self", ".", "load_from_cache", "(", "self", ".", "cache_names", "[", "\"variant\"", "]", ",", "patient", ".", "id", ",", "variant_cache_file_name", ")", "if", "merged_variants", "is", "not", "None", ":", "return", "merged_variants", "# get variant collections from file", "variant_collections", "=", "[", "]", "optional_maf_cols", "=", "[", "\"t_ref_count\"", ",", "\"t_alt_count\"", ",", "\"n_ref_count\"", ",", "\"n_alt_count\"", "]", "if", "self", ".", "additional_maf_cols", "is", "not", "None", ":", "optional_maf_cols", ".", "extend", "(", "self", ".", "additional_maf_cols", ")", "for", "patient_variants", "in", "patient", ".", "variants_list", ":", "if", "type", "(", "patient_variants", ")", "==", "str", ":", "if", "\".vcf\"", "in", "patient_variants", ":", "try", ":", "variant_collections", ".", "append", "(", "varcode", ".", "load_vcf_fast", "(", "patient_variants", ")", ")", "# StopIteration is thrown for empty VCFs. For an empty VCF, don't append any variants,", "# and don't throw an error. But do record a warning, in case the StopIteration was", "# thrown for another reason.", "except", "StopIteration", "as", "e", ":", "logger", ".", "warning", "(", "\"Empty VCF (or possibly a VCF error) for patient {}: {}\"", ".", "format", "(", "patient", ".", "id", ",", "str", "(", "e", ")", ")", ")", "elif", "\".maf\"", "in", "patient_variants", ":", "# See variant_stats.maf_somatic_variant_stats", "variant_collections", ".", "append", "(", "varcode", ".", "load_maf", "(", "patient_variants", ",", "optional_cols", "=", "optional_maf_cols", ",", "encoding", "=", "\"latin-1\"", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Don't know how to read %s\"", "%", "patient_variants", ")", "elif", "type", "(", "patient_variants", ")", "==", "VariantCollection", ":", "variant_collections", ".", "append", "(", "patient_variants", ")", "else", ":", "raise", "ValueError", "(", "\"Don't know how to read %s\"", "%", "patient_variants", ")", "# merge variant-collections", "if", "len", "(", "variant_collections", ")", "==", "0", ":", "no_variants", "=", "True", "elif", "len", "(", "variant_collections", ")", "==", "1", ":", "# There is nothing to merge", "variants", "=", "variant_collections", "[", "0", "]", "merged_variants", "=", "variants", "else", ":", "merged_variants", "=", "self", ".", "_merge_variant_collections", "(", "variant_collections", ",", "self", ".", "merge_type", ")", "except", "IOError", ":", "no_variants", "=", "True", "# Note that this is the number of variant collections and not the number of", "# variants. 0 variants will lead to 0 neoantigens, for example, but 0 variant", "# collections will lead to NaN variants and neoantigens.", "if", "no_variants", ":", "print", "(", "\"Variants did not exist for patient %s\"", "%", "patient", ".", "id", ")", "merged_variants", "=", "None", "# save merged variants to file", "if", "use_cache", ":", "self", ".", "save_to_cache", "(", "merged_variants", ",", "self", ".", "cache_names", "[", "\"variant\"", "]", ",", "patient", ".", "id", ",", "variant_cache_file_name", ")", "return", "merged_variants" ]
Load merged variants for a single patient, optionally using cache Note that merged variants are not filtered. Use `_load_single_patient_variants` to get filtered variants
[ "Load", "merged", "variants", "for", "a", "single", "patient", "optionally", "using", "cache" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L628-L695
hammerlab/cohorts
cohorts/cohort.py
Cohort.load_polyphen_annotations
def load_polyphen_annotations(self, as_dataframe=False, filter_fn=None): """Load a dataframe containing polyphen2 annotations for all variants Parameters ---------- database_file : string, sqlite Path to the WHESS/Polyphen2 SQLite database. Can be downloaded and bunzip2"ed from http://bit.ly/208mlIU filter_fn : function Takes a FilterablePolyphen and returns a boolean. Only annotations returning True are preserved. Overrides default self.filter_fn. `None` passes through to self.filter_fn. Returns ------- annotations Dictionary of patient_id to a DataFrame that contains annotations """ filter_fn = first_not_none_param([filter_fn, self.filter_fn], no_filter) patient_annotations = {} for patient in self: annotations = self._load_single_patient_polyphen( patient, filter_fn=filter_fn) if annotations is not None: annotations["patient_id"] = patient.id patient_annotations[patient.id] = annotations if as_dataframe: return pd.concat(patient_annotations.values()) return patient_annotations
python
def load_polyphen_annotations(self, as_dataframe=False, filter_fn=None): """Load a dataframe containing polyphen2 annotations for all variants Parameters ---------- database_file : string, sqlite Path to the WHESS/Polyphen2 SQLite database. Can be downloaded and bunzip2"ed from http://bit.ly/208mlIU filter_fn : function Takes a FilterablePolyphen and returns a boolean. Only annotations returning True are preserved. Overrides default self.filter_fn. `None` passes through to self.filter_fn. Returns ------- annotations Dictionary of patient_id to a DataFrame that contains annotations """ filter_fn = first_not_none_param([filter_fn, self.filter_fn], no_filter) patient_annotations = {} for patient in self: annotations = self._load_single_patient_polyphen( patient, filter_fn=filter_fn) if annotations is not None: annotations["patient_id"] = patient.id patient_annotations[patient.id] = annotations if as_dataframe: return pd.concat(patient_annotations.values()) return patient_annotations
[ "def", "load_polyphen_annotations", "(", "self", ",", "as_dataframe", "=", "False", ",", "filter_fn", "=", "None", ")", ":", "filter_fn", "=", "first_not_none_param", "(", "[", "filter_fn", ",", "self", ".", "filter_fn", "]", ",", "no_filter", ")", "patient_annotations", "=", "{", "}", "for", "patient", "in", "self", ":", "annotations", "=", "self", ".", "_load_single_patient_polyphen", "(", "patient", ",", "filter_fn", "=", "filter_fn", ")", "if", "annotations", "is", "not", "None", ":", "annotations", "[", "\"patient_id\"", "]", "=", "patient", ".", "id", "patient_annotations", "[", "patient", ".", "id", "]", "=", "annotations", "if", "as_dataframe", ":", "return", "pd", ".", "concat", "(", "patient_annotations", ".", "values", "(", ")", ")", "return", "patient_annotations" ]
Load a dataframe containing polyphen2 annotations for all variants Parameters ---------- database_file : string, sqlite Path to the WHESS/Polyphen2 SQLite database. Can be downloaded and bunzip2"ed from http://bit.ly/208mlIU filter_fn : function Takes a FilterablePolyphen and returns a boolean. Only annotations returning True are preserved. Overrides default self.filter_fn. `None` passes through to self.filter_fn. Returns ------- annotations Dictionary of patient_id to a DataFrame that contains annotations
[ "Load", "a", "dataframe", "containing", "polyphen2", "annotations", "for", "all", "variants" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L708-L738
hammerlab/cohorts
cohorts/cohort.py
Cohort.load_effects
def load_effects(self, patients=None, only_nonsynonymous=False, all_effects=False, filter_fn=None, **kwargs): """Load a dictionary of patient_id to varcode.EffectCollection Note that this only loads one effect per variant. Parameters ---------- patients : str, optional Filter to a subset of patients only_nonsynonymous : bool, optional If true, load only nonsynonymous effects, default False all_effects : bool, optional If true, return all effects rather than only the top-priority effect per variant filter_fn : function Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved. Overrides default self.filter_fn. `None` passes through to self.filter_fn. Returns ------- effects Dictionary of patient_id to varcode.EffectCollection """ filter_fn = first_not_none_param([filter_fn, self.filter_fn], no_filter) filter_fn_name = self._get_function_name(filter_fn) logger.debug("loading effects with filter_fn {}".format(filter_fn_name)) patient_effects = {} for patient in self.iter_patients(patients): effects = self._load_single_patient_effects( patient, only_nonsynonymous, all_effects, filter_fn, **kwargs) if effects is not None: patient_effects[patient.id] = effects return patient_effects
python
def load_effects(self, patients=None, only_nonsynonymous=False, all_effects=False, filter_fn=None, **kwargs): """Load a dictionary of patient_id to varcode.EffectCollection Note that this only loads one effect per variant. Parameters ---------- patients : str, optional Filter to a subset of patients only_nonsynonymous : bool, optional If true, load only nonsynonymous effects, default False all_effects : bool, optional If true, return all effects rather than only the top-priority effect per variant filter_fn : function Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved. Overrides default self.filter_fn. `None` passes through to self.filter_fn. Returns ------- effects Dictionary of patient_id to varcode.EffectCollection """ filter_fn = first_not_none_param([filter_fn, self.filter_fn], no_filter) filter_fn_name = self._get_function_name(filter_fn) logger.debug("loading effects with filter_fn {}".format(filter_fn_name)) patient_effects = {} for patient in self.iter_patients(patients): effects = self._load_single_patient_effects( patient, only_nonsynonymous, all_effects, filter_fn, **kwargs) if effects is not None: patient_effects[patient.id] = effects return patient_effects
[ "def", "load_effects", "(", "self", ",", "patients", "=", "None", ",", "only_nonsynonymous", "=", "False", ",", "all_effects", "=", "False", ",", "filter_fn", "=", "None", ",", "*", "*", "kwargs", ")", ":", "filter_fn", "=", "first_not_none_param", "(", "[", "filter_fn", ",", "self", ".", "filter_fn", "]", ",", "no_filter", ")", "filter_fn_name", "=", "self", ".", "_get_function_name", "(", "filter_fn", ")", "logger", ".", "debug", "(", "\"loading effects with filter_fn {}\"", ".", "format", "(", "filter_fn_name", ")", ")", "patient_effects", "=", "{", "}", "for", "patient", "in", "self", ".", "iter_patients", "(", "patients", ")", ":", "effects", "=", "self", ".", "_load_single_patient_effects", "(", "patient", ",", "only_nonsynonymous", ",", "all_effects", ",", "filter_fn", ",", "*", "*", "kwargs", ")", "if", "effects", "is", "not", "None", ":", "patient_effects", "[", "patient", ".", "id", "]", "=", "effects", "return", "patient_effects" ]
Load a dictionary of patient_id to varcode.EffectCollection Note that this only loads one effect per variant. Parameters ---------- patients : str, optional Filter to a subset of patients only_nonsynonymous : bool, optional If true, load only nonsynonymous effects, default False all_effects : bool, optional If true, return all effects rather than only the top-priority effect per variant filter_fn : function Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved. Overrides default self.filter_fn. `None` passes through to self.filter_fn. Returns ------- effects Dictionary of patient_id to varcode.EffectCollection
[ "Load", "a", "dictionary", "of", "patient_id", "to", "varcode", ".", "EffectCollection" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L790-L822
hammerlab/cohorts
cohorts/cohort.py
Cohort.load_kallisto
def load_kallisto(self): """ Load Kallisto transcript quantification data for a cohort Parameters ---------- Returns ------- kallisto_data : Pandas dataframe Pandas dataframe with Kallisto data for all patients columns include patient_id, gene_name, est_counts """ kallisto_data = pd.concat( [self._load_single_patient_kallisto(patient) for patient in self], copy=False ) if self.kallisto_ensembl_version is None: raise ValueError("Required a kallisto_ensembl_version but none was specified") ensembl_release = cached_release(self.kallisto_ensembl_version) kallisto_data["gene_name"] = \ kallisto_data["target_id"].map(lambda t: ensembl_release.gene_name_of_transcript_id(t)) # sum counts across genes kallisto_data = \ kallisto_data.groupby(["patient_id", "gene_name"])[["est_counts"]].sum().reset_index() return kallisto_data
python
def load_kallisto(self): """ Load Kallisto transcript quantification data for a cohort Parameters ---------- Returns ------- kallisto_data : Pandas dataframe Pandas dataframe with Kallisto data for all patients columns include patient_id, gene_name, est_counts """ kallisto_data = pd.concat( [self._load_single_patient_kallisto(patient) for patient in self], copy=False ) if self.kallisto_ensembl_version is None: raise ValueError("Required a kallisto_ensembl_version but none was specified") ensembl_release = cached_release(self.kallisto_ensembl_version) kallisto_data["gene_name"] = \ kallisto_data["target_id"].map(lambda t: ensembl_release.gene_name_of_transcript_id(t)) # sum counts across genes kallisto_data = \ kallisto_data.groupby(["patient_id", "gene_name"])[["est_counts"]].sum().reset_index() return kallisto_data
[ "def", "load_kallisto", "(", "self", ")", ":", "kallisto_data", "=", "pd", ".", "concat", "(", "[", "self", ".", "_load_single_patient_kallisto", "(", "patient", ")", "for", "patient", "in", "self", "]", ",", "copy", "=", "False", ")", "if", "self", ".", "kallisto_ensembl_version", "is", "None", ":", "raise", "ValueError", "(", "\"Required a kallisto_ensembl_version but none was specified\"", ")", "ensembl_release", "=", "cached_release", "(", "self", ".", "kallisto_ensembl_version", ")", "kallisto_data", "[", "\"gene_name\"", "]", "=", "kallisto_data", "[", "\"target_id\"", "]", ".", "map", "(", "lambda", "t", ":", "ensembl_release", ".", "gene_name_of_transcript_id", "(", "t", ")", ")", "# sum counts across genes", "kallisto_data", "=", "kallisto_data", ".", "groupby", "(", "[", "\"patient_id\"", ",", "\"gene_name\"", "]", ")", "[", "[", "\"est_counts\"", "]", "]", ".", "sum", "(", ")", ".", "reset_index", "(", ")", "return", "kallisto_data" ]
Load Kallisto transcript quantification data for a cohort Parameters ---------- Returns ------- kallisto_data : Pandas dataframe Pandas dataframe with Kallisto data for all patients columns include patient_id, gene_name, est_counts
[ "Load", "Kallisto", "transcript", "quantification", "data", "for", "a", "cohort" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L865-L895
hammerlab/cohorts
cohorts/cohort.py
Cohort._load_single_patient_kallisto
def _load_single_patient_kallisto(self, patient): """ Load Kallisto gene quantification given a patient Parameters ---------- patient : Patient Returns ------- data: Pandas dataframe Pandas dataframe of sample's Kallisto data columns include patient_id, target_id, length, eff_length, est_counts, tpm """ data = pd.read_csv(patient.tumor_sample.kallisto_path, sep="\t") data["patient_id"] = patient.id return data
python
def _load_single_patient_kallisto(self, patient): """ Load Kallisto gene quantification given a patient Parameters ---------- patient : Patient Returns ------- data: Pandas dataframe Pandas dataframe of sample's Kallisto data columns include patient_id, target_id, length, eff_length, est_counts, tpm """ data = pd.read_csv(patient.tumor_sample.kallisto_path, sep="\t") data["patient_id"] = patient.id return data
[ "def", "_load_single_patient_kallisto", "(", "self", ",", "patient", ")", ":", "data", "=", "pd", ".", "read_csv", "(", "patient", ".", "tumor_sample", ".", "kallisto_path", ",", "sep", "=", "\"\\t\"", ")", "data", "[", "\"patient_id\"", "]", "=", "patient", ".", "id", "return", "data" ]
Load Kallisto gene quantification given a patient Parameters ---------- patient : Patient Returns ------- data: Pandas dataframe Pandas dataframe of sample's Kallisto data columns include patient_id, target_id, length, eff_length, est_counts, tpm
[ "Load", "Kallisto", "gene", "quantification", "given", "a", "patient" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L897-L913
hammerlab/cohorts
cohorts/cohort.py
Cohort.load_cufflinks
def load_cufflinks(self, filter_ok=True): """ Load a Cufflinks gene expression data for a cohort Parameters ---------- filter_ok : bool, optional If true, filter Cufflinks data to row with FPKM_status == "OK" Returns ------- cufflinks_data : Pandas dataframe Pandas dataframe with Cufflinks data for all patients columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi """ return \ pd.concat( [self._load_single_patient_cufflinks(patient, filter_ok) for patient in self], copy=False )
python
def load_cufflinks(self, filter_ok=True): """ Load a Cufflinks gene expression data for a cohort Parameters ---------- filter_ok : bool, optional If true, filter Cufflinks data to row with FPKM_status == "OK" Returns ------- cufflinks_data : Pandas dataframe Pandas dataframe with Cufflinks data for all patients columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi """ return \ pd.concat( [self._load_single_patient_cufflinks(patient, filter_ok) for patient in self], copy=False )
[ "def", "load_cufflinks", "(", "self", ",", "filter_ok", "=", "True", ")", ":", "return", "pd", ".", "concat", "(", "[", "self", ".", "_load_single_patient_cufflinks", "(", "patient", ",", "filter_ok", ")", "for", "patient", "in", "self", "]", ",", "copy", "=", "False", ")" ]
Load a Cufflinks gene expression data for a cohort Parameters ---------- filter_ok : bool, optional If true, filter Cufflinks data to row with FPKM_status == "OK" Returns ------- cufflinks_data : Pandas dataframe Pandas dataframe with Cufflinks data for all patients columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi
[ "Load", "a", "Cufflinks", "gene", "expression", "data", "for", "a", "cohort" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L915-L934
hammerlab/cohorts
cohorts/cohort.py
Cohort._load_single_patient_cufflinks
def _load_single_patient_cufflinks(self, patient, filter_ok): """ Load Cufflinks gene quantification given a patient Parameters ---------- patient : Patient filter_ok : bool, optional If true, filter Cufflinks data to row with FPKM_status == "OK" Returns ------- data: Pandas dataframe Pandas dataframe of sample's Cufflinks data columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi """ data = pd.read_csv(patient.tumor_sample.cufflinks_path, sep="\t") data["patient_id"] = patient.id if filter_ok: # Filter to OK FPKM counts data = data[data["FPKM_status"] == "OK"] return data
python
def _load_single_patient_cufflinks(self, patient, filter_ok): """ Load Cufflinks gene quantification given a patient Parameters ---------- patient : Patient filter_ok : bool, optional If true, filter Cufflinks data to row with FPKM_status == "OK" Returns ------- data: Pandas dataframe Pandas dataframe of sample's Cufflinks data columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi """ data = pd.read_csv(patient.tumor_sample.cufflinks_path, sep="\t") data["patient_id"] = patient.id if filter_ok: # Filter to OK FPKM counts data = data[data["FPKM_status"] == "OK"] return data
[ "def", "_load_single_patient_cufflinks", "(", "self", ",", "patient", ",", "filter_ok", ")", ":", "data", "=", "pd", ".", "read_csv", "(", "patient", ".", "tumor_sample", ".", "cufflinks_path", ",", "sep", "=", "\"\\t\"", ")", "data", "[", "\"patient_id\"", "]", "=", "patient", ".", "id", "if", "filter_ok", ":", "# Filter to OK FPKM counts", "data", "=", "data", "[", "data", "[", "\"FPKM_status\"", "]", "==", "\"OK\"", "]", "return", "data" ]
Load Cufflinks gene quantification given a patient Parameters ---------- patient : Patient filter_ok : bool, optional If true, filter Cufflinks data to row with FPKM_status == "OK" Returns ------- data: Pandas dataframe Pandas dataframe of sample's Cufflinks data columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi
[ "Load", "Cufflinks", "gene", "quantification", "given", "a", "patient" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L936-L958
hammerlab/cohorts
cohorts/cohort.py
Cohort.get_filtered_isovar_epitopes
def get_filtered_isovar_epitopes(self, epitopes, ic50_cutoff): """ Mostly replicates topiary.build_epitope_collection_from_binding_predictions Note: topiary needs to do fancy stuff like subsequence_protein_offset + binding_prediction.offset in order to figure out whether a variant is in the peptide because it only has the variant's offset into the full protein; but isovar gives us the variant's offset into the protein subsequence (dictated by protein_sequence_length); so all we need to do is map that onto the smaller 8-11mer peptides generated by mhctools. """ mutant_binding_predictions = [] for binding_prediction in epitopes: peptide = binding_prediction.peptide peptide_offset = binding_prediction.offset isovar_row = dict(binding_prediction.source_sequence_key) is_mutant = contains_mutant_residues( peptide_start_in_protein=peptide_offset, peptide_length=len(peptide), mutation_start_in_protein=isovar_row["variant_aa_interval_start"], mutation_end_in_protein=isovar_row["variant_aa_interval_end"]) if is_mutant and binding_prediction.value <= ic50_cutoff: mutant_binding_predictions.append(binding_prediction) return EpitopeCollection(mutant_binding_predictions)
python
def get_filtered_isovar_epitopes(self, epitopes, ic50_cutoff): """ Mostly replicates topiary.build_epitope_collection_from_binding_predictions Note: topiary needs to do fancy stuff like subsequence_protein_offset + binding_prediction.offset in order to figure out whether a variant is in the peptide because it only has the variant's offset into the full protein; but isovar gives us the variant's offset into the protein subsequence (dictated by protein_sequence_length); so all we need to do is map that onto the smaller 8-11mer peptides generated by mhctools. """ mutant_binding_predictions = [] for binding_prediction in epitopes: peptide = binding_prediction.peptide peptide_offset = binding_prediction.offset isovar_row = dict(binding_prediction.source_sequence_key) is_mutant = contains_mutant_residues( peptide_start_in_protein=peptide_offset, peptide_length=len(peptide), mutation_start_in_protein=isovar_row["variant_aa_interval_start"], mutation_end_in_protein=isovar_row["variant_aa_interval_end"]) if is_mutant and binding_prediction.value <= ic50_cutoff: mutant_binding_predictions.append(binding_prediction) return EpitopeCollection(mutant_binding_predictions)
[ "def", "get_filtered_isovar_epitopes", "(", "self", ",", "epitopes", ",", "ic50_cutoff", ")", ":", "mutant_binding_predictions", "=", "[", "]", "for", "binding_prediction", "in", "epitopes", ":", "peptide", "=", "binding_prediction", ".", "peptide", "peptide_offset", "=", "binding_prediction", ".", "offset", "isovar_row", "=", "dict", "(", "binding_prediction", ".", "source_sequence_key", ")", "is_mutant", "=", "contains_mutant_residues", "(", "peptide_start_in_protein", "=", "peptide_offset", ",", "peptide_length", "=", "len", "(", "peptide", ")", ",", "mutation_start_in_protein", "=", "isovar_row", "[", "\"variant_aa_interval_start\"", "]", ",", "mutation_end_in_protein", "=", "isovar_row", "[", "\"variant_aa_interval_end\"", "]", ")", "if", "is_mutant", "and", "binding_prediction", ".", "value", "<=", "ic50_cutoff", ":", "mutant_binding_predictions", ".", "append", "(", "binding_prediction", ")", "return", "EpitopeCollection", "(", "mutant_binding_predictions", ")" ]
Mostly replicates topiary.build_epitope_collection_from_binding_predictions Note: topiary needs to do fancy stuff like subsequence_protein_offset + binding_prediction.offset in order to figure out whether a variant is in the peptide because it only has the variant's offset into the full protein; but isovar gives us the variant's offset into the protein subsequence (dictated by protein_sequence_length); so all we need to do is map that onto the smaller 8-11mer peptides generated by mhctools.
[ "Mostly", "replicates", "topiary", ".", "build_epitope_collection_from_binding_predictions" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1064-L1086
hammerlab/cohorts
cohorts/cohort.py
Cohort.plot_roc_curve
def plot_roc_curve(self, on, bootstrap_samples=100, ax=None, **kwargs): """Plot an ROC curve for benefit and a given variable Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` bootstrap_samples : int, optional Number of boostrap samples to use to compute the AUC ax : Axes, default None Axes to plot on Returns ------- (mean_auc_score, plot): (float, matplotlib plot) Returns the average AUC for the given predictor over `bootstrap_samples` and the associated ROC curve """ plot_col, df = self.as_dataframe(on, return_cols=True, **kwargs) df = filter_not_null(df, "benefit") df = filter_not_null(df, plot_col) df.benefit = df.benefit.astype(bool) return roc_curve_plot(df, plot_col, "benefit", bootstrap_samples, ax=ax)
python
def plot_roc_curve(self, on, bootstrap_samples=100, ax=None, **kwargs): """Plot an ROC curve for benefit and a given variable Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` bootstrap_samples : int, optional Number of boostrap samples to use to compute the AUC ax : Axes, default None Axes to plot on Returns ------- (mean_auc_score, plot): (float, matplotlib plot) Returns the average AUC for the given predictor over `bootstrap_samples` and the associated ROC curve """ plot_col, df = self.as_dataframe(on, return_cols=True, **kwargs) df = filter_not_null(df, "benefit") df = filter_not_null(df, plot_col) df.benefit = df.benefit.astype(bool) return roc_curve_plot(df, plot_col, "benefit", bootstrap_samples, ax=ax)
[ "def", "plot_roc_curve", "(", "self", ",", "on", ",", "bootstrap_samples", "=", "100", ",", "ax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "plot_col", ",", "df", "=", "self", ".", "as_dataframe", "(", "on", ",", "return_cols", "=", "True", ",", "*", "*", "kwargs", ")", "df", "=", "filter_not_null", "(", "df", ",", "\"benefit\"", ")", "df", "=", "filter_not_null", "(", "df", ",", "plot_col", ")", "df", ".", "benefit", "=", "df", ".", "benefit", ".", "astype", "(", "bool", ")", "return", "roc_curve_plot", "(", "df", ",", "plot_col", ",", "\"benefit\"", ",", "bootstrap_samples", ",", "ax", "=", "ax", ")" ]
Plot an ROC curve for benefit and a given variable Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` bootstrap_samples : int, optional Number of boostrap samples to use to compute the AUC ax : Axes, default None Axes to plot on Returns ------- (mean_auc_score, plot): (float, matplotlib plot) Returns the average AUC for the given predictor over `bootstrap_samples` and the associated ROC curve
[ "Plot", "an", "ROC", "curve", "for", "benefit", "and", "a", "given", "variable" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1171-L1193
hammerlab/cohorts
cohorts/cohort.py
Cohort.plot_benefit
def plot_benefit(self, on, benefit_col="benefit", label="Response", ax=None, alternative="two-sided", boolean_value_map={}, order=None, **kwargs): """Plot a comparison of benefit/response in the cohort on a given variable """ no_benefit_plot_name = "No %s" % self.benefit_plot_name boolean_value_map = boolean_value_map or {True: self.benefit_plot_name, False: no_benefit_plot_name} order = order or [no_benefit_plot_name, self.benefit_plot_name] return self.plot_boolean(on=on, boolean_col=benefit_col, alternative=alternative, boolean_label=label, boolean_value_map=boolean_value_map, order=order, ax=ax, **kwargs)
python
def plot_benefit(self, on, benefit_col="benefit", label="Response", ax=None, alternative="two-sided", boolean_value_map={}, order=None, **kwargs): """Plot a comparison of benefit/response in the cohort on a given variable """ no_benefit_plot_name = "No %s" % self.benefit_plot_name boolean_value_map = boolean_value_map or {True: self.benefit_plot_name, False: no_benefit_plot_name} order = order or [no_benefit_plot_name, self.benefit_plot_name] return self.plot_boolean(on=on, boolean_col=benefit_col, alternative=alternative, boolean_label=label, boolean_value_map=boolean_value_map, order=order, ax=ax, **kwargs)
[ "def", "plot_benefit", "(", "self", ",", "on", ",", "benefit_col", "=", "\"benefit\"", ",", "label", "=", "\"Response\"", ",", "ax", "=", "None", ",", "alternative", "=", "\"two-sided\"", ",", "boolean_value_map", "=", "{", "}", ",", "order", "=", "None", ",", "*", "*", "kwargs", ")", ":", "no_benefit_plot_name", "=", "\"No %s\"", "%", "self", ".", "benefit_plot_name", "boolean_value_map", "=", "boolean_value_map", "or", "{", "True", ":", "self", ".", "benefit_plot_name", ",", "False", ":", "no_benefit_plot_name", "}", "order", "=", "order", "or", "[", "no_benefit_plot_name", ",", "self", ".", "benefit_plot_name", "]", "return", "self", ".", "plot_boolean", "(", "on", "=", "on", ",", "boolean_col", "=", "benefit_col", ",", "alternative", "=", "alternative", ",", "boolean_label", "=", "label", ",", "boolean_value_map", "=", "boolean_value_map", ",", "order", "=", "order", ",", "ax", "=", "ax", ",", "*", "*", "kwargs", ")" ]
Plot a comparison of benefit/response in the cohort on a given variable
[ "Plot", "a", "comparison", "of", "benefit", "/", "response", "in", "the", "cohort", "on", "a", "given", "variable" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1195-L1211
hammerlab/cohorts
cohorts/cohort.py
Cohort.plot_boolean
def plot_boolean(self, on, boolean_col, plot_col=None, boolean_label=None, boolean_value_map={}, order=None, ax=None, alternative="two-sided", **kwargs): """Plot a comparison of `boolean_col` in the cohort on a given variable via `on` or `col`. If the variable (through `on` or `col`) is binary this will compare odds-ratios and perform a Fisher's exact test. If the variable is numeric, this will compare the distributions through a Mann-Whitney test and plot the distributions with box-strip plot Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` plot_col : str, optional If on has many columns, this is the one whose values we are plotting. If on has a single column, this is unnecessary. We might want many columns if, e.g. we're generating boolean_col from a function as well. boolean_col : str Column name of boolean column to plot or compare against. boolean_label : None, optional Label to give boolean column in the plot boolean_value_map : dict, optional Map of conversions for values in the boolean column, i.e. {True: 'High', False: 'Low'} order : None, optional Order of the labels on the x-axis ax : None, optional Axes to plot on alternative : str, optional Choose the sidedness of the mannwhitneyu or Fisher's Exact test. Returns ------- (Test statistic, p-value): (float, float) """ cols, df = self.as_dataframe(on, return_cols=True, **kwargs) plot_col = self.plot_col_from_cols(cols=cols, plot_col=plot_col) df = filter_not_null(df, boolean_col) df = filter_not_null(df, plot_col) if boolean_label: df[boolean_label] = df[boolean_col] boolean_col = boolean_label condition_value = None if boolean_value_map: assert set(boolean_value_map.keys()) == set([True, False]), \ "Improper mapping of boolean column provided" df[boolean_col] = df[boolean_col].map(lambda v: boolean_value_map[v]) condition_value = boolean_value_map[True] if df[plot_col].dtype == "bool": results = fishers_exact_plot( data=df, condition1=boolean_col, condition2=plot_col, condition1_value=condition_value, alternative=alternative, order=order, ax=ax) else: results = mann_whitney_plot( data=df, condition=boolean_col, distribution=plot_col, condition_value=condition_value, alternative=alternative, order=order, ax=ax) return results
python
def plot_boolean(self, on, boolean_col, plot_col=None, boolean_label=None, boolean_value_map={}, order=None, ax=None, alternative="two-sided", **kwargs): """Plot a comparison of `boolean_col` in the cohort on a given variable via `on` or `col`. If the variable (through `on` or `col`) is binary this will compare odds-ratios and perform a Fisher's exact test. If the variable is numeric, this will compare the distributions through a Mann-Whitney test and plot the distributions with box-strip plot Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` plot_col : str, optional If on has many columns, this is the one whose values we are plotting. If on has a single column, this is unnecessary. We might want many columns if, e.g. we're generating boolean_col from a function as well. boolean_col : str Column name of boolean column to plot or compare against. boolean_label : None, optional Label to give boolean column in the plot boolean_value_map : dict, optional Map of conversions for values in the boolean column, i.e. {True: 'High', False: 'Low'} order : None, optional Order of the labels on the x-axis ax : None, optional Axes to plot on alternative : str, optional Choose the sidedness of the mannwhitneyu or Fisher's Exact test. Returns ------- (Test statistic, p-value): (float, float) """ cols, df = self.as_dataframe(on, return_cols=True, **kwargs) plot_col = self.plot_col_from_cols(cols=cols, plot_col=plot_col) df = filter_not_null(df, boolean_col) df = filter_not_null(df, plot_col) if boolean_label: df[boolean_label] = df[boolean_col] boolean_col = boolean_label condition_value = None if boolean_value_map: assert set(boolean_value_map.keys()) == set([True, False]), \ "Improper mapping of boolean column provided" df[boolean_col] = df[boolean_col].map(lambda v: boolean_value_map[v]) condition_value = boolean_value_map[True] if df[plot_col].dtype == "bool": results = fishers_exact_plot( data=df, condition1=boolean_col, condition2=plot_col, condition1_value=condition_value, alternative=alternative, order=order, ax=ax) else: results = mann_whitney_plot( data=df, condition=boolean_col, distribution=plot_col, condition_value=condition_value, alternative=alternative, order=order, ax=ax) return results
[ "def", "plot_boolean", "(", "self", ",", "on", ",", "boolean_col", ",", "plot_col", "=", "None", ",", "boolean_label", "=", "None", ",", "boolean_value_map", "=", "{", "}", ",", "order", "=", "None", ",", "ax", "=", "None", ",", "alternative", "=", "\"two-sided\"", ",", "*", "*", "kwargs", ")", ":", "cols", ",", "df", "=", "self", ".", "as_dataframe", "(", "on", ",", "return_cols", "=", "True", ",", "*", "*", "kwargs", ")", "plot_col", "=", "self", ".", "plot_col_from_cols", "(", "cols", "=", "cols", ",", "plot_col", "=", "plot_col", ")", "df", "=", "filter_not_null", "(", "df", ",", "boolean_col", ")", "df", "=", "filter_not_null", "(", "df", ",", "plot_col", ")", "if", "boolean_label", ":", "df", "[", "boolean_label", "]", "=", "df", "[", "boolean_col", "]", "boolean_col", "=", "boolean_label", "condition_value", "=", "None", "if", "boolean_value_map", ":", "assert", "set", "(", "boolean_value_map", ".", "keys", "(", ")", ")", "==", "set", "(", "[", "True", ",", "False", "]", ")", ",", "\"Improper mapping of boolean column provided\"", "df", "[", "boolean_col", "]", "=", "df", "[", "boolean_col", "]", ".", "map", "(", "lambda", "v", ":", "boolean_value_map", "[", "v", "]", ")", "condition_value", "=", "boolean_value_map", "[", "True", "]", "if", "df", "[", "plot_col", "]", ".", "dtype", "==", "\"bool\"", ":", "results", "=", "fishers_exact_plot", "(", "data", "=", "df", ",", "condition1", "=", "boolean_col", ",", "condition2", "=", "plot_col", ",", "condition1_value", "=", "condition_value", ",", "alternative", "=", "alternative", ",", "order", "=", "order", ",", "ax", "=", "ax", ")", "else", ":", "results", "=", "mann_whitney_plot", "(", "data", "=", "df", ",", "condition", "=", "boolean_col", ",", "distribution", "=", "plot_col", ",", "condition_value", "=", "condition_value", ",", "alternative", "=", "alternative", ",", "order", "=", "order", ",", "ax", "=", "ax", ")", "return", "results" ]
Plot a comparison of `boolean_col` in the cohort on a given variable via `on` or `col`. If the variable (through `on` or `col`) is binary this will compare odds-ratios and perform a Fisher's exact test. If the variable is numeric, this will compare the distributions through a Mann-Whitney test and plot the distributions with box-strip plot Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` plot_col : str, optional If on has many columns, this is the one whose values we are plotting. If on has a single column, this is unnecessary. We might want many columns if, e.g. we're generating boolean_col from a function as well. boolean_col : str Column name of boolean column to plot or compare against. boolean_label : None, optional Label to give boolean column in the plot boolean_value_map : dict, optional Map of conversions for values in the boolean column, i.e. {True: 'High', False: 'Low'} order : None, optional Order of the labels on the x-axis ax : None, optional Axes to plot on alternative : str, optional Choose the sidedness of the mannwhitneyu or Fisher's Exact test. Returns ------- (Test statistic, p-value): (float, float)
[ "Plot", "a", "comparison", "of", "boolean_col", "in", "the", "cohort", "on", "a", "given", "variable", "via", "on", "or", "col", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1213-L1293
hammerlab/cohorts
cohorts/cohort.py
Cohort.plot_survival
def plot_survival(self, on, how="os", survival_units="Days", strata=None, ax=None, ci_show=False, with_condition_color="#B38600", no_condition_color="#A941AC", with_condition_label=None, no_condition_label=None, color_map=None, label_map=None, color_palette="Set2", threshold=None, **kwargs): """Plot a Kaplan Meier survival curve by splitting the cohort into two groups Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` how : {"os", "pfs"}, optional Whether to plot OS (overall survival) or PFS (progression free survival) survival_units : str Unit of time for the survival measure, i.e. Days or Months strata : str (optional) column name of stratifying variable ci_show : bool Display the confidence interval around the survival curve threshold : int, "median", "median-per-strata" or None (optional) Threshold of `col` on which to split the cohort """ assert how in ["os", "pfs"], "Invalid choice of survival plot type %s" % how cols, df = self.as_dataframe(on, return_cols=True, **kwargs) plot_col = self.plot_col_from_cols(cols=cols, only_allow_one=True) df = filter_not_null(df, plot_col) results = plot_kmf( df=df, condition_col=plot_col, xlabel=survival_units, ylabel="Overall Survival (%)" if how == "os" else "Progression-Free Survival (%)", censor_col="deceased" if how == "os" else "progressed_or_deceased", survival_col=how, strata_col=strata, threshold=threshold, ax=ax, ci_show=ci_show, with_condition_color=with_condition_color, no_condition_color=no_condition_color, with_condition_label=with_condition_label, no_condition_label=no_condition_label, color_palette=color_palette, label_map=label_map, color_map=color_map, ) return results
python
def plot_survival(self, on, how="os", survival_units="Days", strata=None, ax=None, ci_show=False, with_condition_color="#B38600", no_condition_color="#A941AC", with_condition_label=None, no_condition_label=None, color_map=None, label_map=None, color_palette="Set2", threshold=None, **kwargs): """Plot a Kaplan Meier survival curve by splitting the cohort into two groups Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` how : {"os", "pfs"}, optional Whether to plot OS (overall survival) or PFS (progression free survival) survival_units : str Unit of time for the survival measure, i.e. Days or Months strata : str (optional) column name of stratifying variable ci_show : bool Display the confidence interval around the survival curve threshold : int, "median", "median-per-strata" or None (optional) Threshold of `col` on which to split the cohort """ assert how in ["os", "pfs"], "Invalid choice of survival plot type %s" % how cols, df = self.as_dataframe(on, return_cols=True, **kwargs) plot_col = self.plot_col_from_cols(cols=cols, only_allow_one=True) df = filter_not_null(df, plot_col) results = plot_kmf( df=df, condition_col=plot_col, xlabel=survival_units, ylabel="Overall Survival (%)" if how == "os" else "Progression-Free Survival (%)", censor_col="deceased" if how == "os" else "progressed_or_deceased", survival_col=how, strata_col=strata, threshold=threshold, ax=ax, ci_show=ci_show, with_condition_color=with_condition_color, no_condition_color=no_condition_color, with_condition_label=with_condition_label, no_condition_label=no_condition_label, color_palette=color_palette, label_map=label_map, color_map=color_map, ) return results
[ "def", "plot_survival", "(", "self", ",", "on", ",", "how", "=", "\"os\"", ",", "survival_units", "=", "\"Days\"", ",", "strata", "=", "None", ",", "ax", "=", "None", ",", "ci_show", "=", "False", ",", "with_condition_color", "=", "\"#B38600\"", ",", "no_condition_color", "=", "\"#A941AC\"", ",", "with_condition_label", "=", "None", ",", "no_condition_label", "=", "None", ",", "color_map", "=", "None", ",", "label_map", "=", "None", ",", "color_palette", "=", "\"Set2\"", ",", "threshold", "=", "None", ",", "*", "*", "kwargs", ")", ":", "assert", "how", "in", "[", "\"os\"", ",", "\"pfs\"", "]", ",", "\"Invalid choice of survival plot type %s\"", "%", "how", "cols", ",", "df", "=", "self", ".", "as_dataframe", "(", "on", ",", "return_cols", "=", "True", ",", "*", "*", "kwargs", ")", "plot_col", "=", "self", ".", "plot_col_from_cols", "(", "cols", "=", "cols", ",", "only_allow_one", "=", "True", ")", "df", "=", "filter_not_null", "(", "df", ",", "plot_col", ")", "results", "=", "plot_kmf", "(", "df", "=", "df", ",", "condition_col", "=", "plot_col", ",", "xlabel", "=", "survival_units", ",", "ylabel", "=", "\"Overall Survival (%)\"", "if", "how", "==", "\"os\"", "else", "\"Progression-Free Survival (%)\"", ",", "censor_col", "=", "\"deceased\"", "if", "how", "==", "\"os\"", "else", "\"progressed_or_deceased\"", ",", "survival_col", "=", "how", ",", "strata_col", "=", "strata", ",", "threshold", "=", "threshold", ",", "ax", "=", "ax", ",", "ci_show", "=", "ci_show", ",", "with_condition_color", "=", "with_condition_color", ",", "no_condition_color", "=", "no_condition_color", ",", "with_condition_label", "=", "with_condition_label", ",", "no_condition_label", "=", "no_condition_label", ",", "color_palette", "=", "color_palette", ",", "label_map", "=", "label_map", ",", "color_map", "=", "color_map", ",", ")", "return", "results" ]
Plot a Kaplan Meier survival curve by splitting the cohort into two groups Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` how : {"os", "pfs"}, optional Whether to plot OS (overall survival) or PFS (progression free survival) survival_units : str Unit of time for the survival measure, i.e. Days or Months strata : str (optional) column name of stratifying variable ci_show : bool Display the confidence interval around the survival curve threshold : int, "median", "median-per-strata" or None (optional) Threshold of `col` on which to split the cohort
[ "Plot", "a", "Kaplan", "Meier", "survival", "curve", "by", "splitting", "the", "cohort", "into", "two", "groups", "Parameters", "----------", "on", ":", "str", "or", "function", "or", "list", "or", "dict", "See", "cohort", ".", "load", ".", "as_dataframe", "how", ":", "{", "os", "pfs", "}", "optional", "Whether", "to", "plot", "OS", "(", "overall", "survival", ")", "or", "PFS", "(", "progression", "free", "survival", ")", "survival_units", ":", "str", "Unit", "of", "time", "for", "the", "survival", "measure", "i", ".", "e", ".", "Days", "or", "Months", "strata", ":", "str", "(", "optional", ")", "column", "name", "of", "stratifying", "variable", "ci_show", ":", "bool", "Display", "the", "confidence", "interval", "around", "the", "survival", "curve", "threshold", ":", "int", "median", "median", "-", "per", "-", "strata", "or", "None", "(", "optional", ")", "Threshold", "of", "col", "on", "which", "to", "split", "the", "cohort" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1295-L1349
hammerlab/cohorts
cohorts/cohort.py
Cohort.plot_correlation
def plot_correlation(self, on, x_col=None, plot_type="jointplot", stat_func=pearsonr, show_stat_func=True, plot_kwargs={}, **kwargs): """Plot the correlation between two variables. Parameters ---------- on : list or dict of functions or strings See `cohort.load.as_dataframe` x_col : str, optional If `on` is a dict, this guarantees we have the expected ordering. plot_type : str, optional Specify "jointplot", "regplot", "boxplot", or "barplot". stat_func : function, optional. Specify which function to use for the statistical test. show_stat_func : bool, optional Whether or not to show the stat_func result in the plot itself. plot_kwargs : dict, optional kwargs to pass through to plotting functions. """ if plot_type not in ["boxplot", "barplot", "jointplot", "regplot"]: raise ValueError("Invalid plot_type %s" % plot_type) plot_cols, df = self.as_dataframe(on, return_cols=True, **kwargs) if len(plot_cols) != 2: raise ValueError("Must be comparing two columns, but there are %d columns" % len(plot_cols)) for plot_col in plot_cols: df = filter_not_null(df, plot_col) if x_col is None: x_col = plot_cols[0] y_col = plot_cols[1] else: if x_col == plot_cols[0]: y_col = plot_cols[1] else: y_col = plot_cols[0] series_x = df[x_col] series_y = df[y_col] coeff, p_value = stat_func(series_x, series_y) if plot_type == "jointplot": plot = sb.jointplot(data=df, x=x_col, y=y_col, stat_func=stat_func if show_stat_func else None, **plot_kwargs) elif plot_type == "regplot": plot = sb.regplot(data=df, x=x_col, y=y_col, **plot_kwargs) elif plot_type == "boxplot": plot = stripboxplot(data=df, x=x_col, y=y_col, **plot_kwargs) else: plot = sb.barplot(data=df, x=x_col, y=y_col, **plot_kwargs) return CorrelationResults(coeff=coeff, p_value=p_value, stat_func=stat_func, series_x=series_x, series_y=series_y, plot=plot)
python
def plot_correlation(self, on, x_col=None, plot_type="jointplot", stat_func=pearsonr, show_stat_func=True, plot_kwargs={}, **kwargs): """Plot the correlation between two variables. Parameters ---------- on : list or dict of functions or strings See `cohort.load.as_dataframe` x_col : str, optional If `on` is a dict, this guarantees we have the expected ordering. plot_type : str, optional Specify "jointplot", "regplot", "boxplot", or "barplot". stat_func : function, optional. Specify which function to use for the statistical test. show_stat_func : bool, optional Whether or not to show the stat_func result in the plot itself. plot_kwargs : dict, optional kwargs to pass through to plotting functions. """ if plot_type not in ["boxplot", "barplot", "jointplot", "regplot"]: raise ValueError("Invalid plot_type %s" % plot_type) plot_cols, df = self.as_dataframe(on, return_cols=True, **kwargs) if len(plot_cols) != 2: raise ValueError("Must be comparing two columns, but there are %d columns" % len(plot_cols)) for plot_col in plot_cols: df = filter_not_null(df, plot_col) if x_col is None: x_col = plot_cols[0] y_col = plot_cols[1] else: if x_col == plot_cols[0]: y_col = plot_cols[1] else: y_col = plot_cols[0] series_x = df[x_col] series_y = df[y_col] coeff, p_value = stat_func(series_x, series_y) if plot_type == "jointplot": plot = sb.jointplot(data=df, x=x_col, y=y_col, stat_func=stat_func if show_stat_func else None, **plot_kwargs) elif plot_type == "regplot": plot = sb.regplot(data=df, x=x_col, y=y_col, **plot_kwargs) elif plot_type == "boxplot": plot = stripboxplot(data=df, x=x_col, y=y_col, **plot_kwargs) else: plot = sb.barplot(data=df, x=x_col, y=y_col, **plot_kwargs) return CorrelationResults(coeff=coeff, p_value=p_value, stat_func=stat_func, series_x=series_x, series_y=series_y, plot=plot)
[ "def", "plot_correlation", "(", "self", ",", "on", ",", "x_col", "=", "None", ",", "plot_type", "=", "\"jointplot\"", ",", "stat_func", "=", "pearsonr", ",", "show_stat_func", "=", "True", ",", "plot_kwargs", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "if", "plot_type", "not", "in", "[", "\"boxplot\"", ",", "\"barplot\"", ",", "\"jointplot\"", ",", "\"regplot\"", "]", ":", "raise", "ValueError", "(", "\"Invalid plot_type %s\"", "%", "plot_type", ")", "plot_cols", ",", "df", "=", "self", ".", "as_dataframe", "(", "on", ",", "return_cols", "=", "True", ",", "*", "*", "kwargs", ")", "if", "len", "(", "plot_cols", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"Must be comparing two columns, but there are %d columns\"", "%", "len", "(", "plot_cols", ")", ")", "for", "plot_col", "in", "plot_cols", ":", "df", "=", "filter_not_null", "(", "df", ",", "plot_col", ")", "if", "x_col", "is", "None", ":", "x_col", "=", "plot_cols", "[", "0", "]", "y_col", "=", "plot_cols", "[", "1", "]", "else", ":", "if", "x_col", "==", "plot_cols", "[", "0", "]", ":", "y_col", "=", "plot_cols", "[", "1", "]", "else", ":", "y_col", "=", "plot_cols", "[", "0", "]", "series_x", "=", "df", "[", "x_col", "]", "series_y", "=", "df", "[", "y_col", "]", "coeff", ",", "p_value", "=", "stat_func", "(", "series_x", ",", "series_y", ")", "if", "plot_type", "==", "\"jointplot\"", ":", "plot", "=", "sb", ".", "jointplot", "(", "data", "=", "df", ",", "x", "=", "x_col", ",", "y", "=", "y_col", ",", "stat_func", "=", "stat_func", "if", "show_stat_func", "else", "None", ",", "*", "*", "plot_kwargs", ")", "elif", "plot_type", "==", "\"regplot\"", ":", "plot", "=", "sb", ".", "regplot", "(", "data", "=", "df", ",", "x", "=", "x_col", ",", "y", "=", "y_col", ",", "*", "*", "plot_kwargs", ")", "elif", "plot_type", "==", "\"boxplot\"", ":", "plot", "=", "stripboxplot", "(", "data", "=", "df", ",", "x", "=", "x_col", ",", "y", "=", "y_col", ",", "*", "*", "plot_kwargs", ")", "else", ":", "plot", "=", "sb", ".", "barplot", "(", "data", "=", "df", ",", "x", "=", "x_col", ",", "y", "=", "y_col", ",", "*", "*", "plot_kwargs", ")", "return", "CorrelationResults", "(", "coeff", "=", "coeff", ",", "p_value", "=", "p_value", ",", "stat_func", "=", "stat_func", ",", "series_x", "=", "series_x", ",", "series_y", "=", "series_y", ",", "plot", "=", "plot", ")" ]
Plot the correlation between two variables. Parameters ---------- on : list or dict of functions or strings See `cohort.load.as_dataframe` x_col : str, optional If `on` is a dict, this guarantees we have the expected ordering. plot_type : str, optional Specify "jointplot", "regplot", "boxplot", or "barplot". stat_func : function, optional. Specify which function to use for the statistical test. show_stat_func : bool, optional Whether or not to show the stat_func result in the plot itself. plot_kwargs : dict, optional kwargs to pass through to plotting functions.
[ "Plot", "the", "correlation", "between", "two", "variables", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1351-L1399
hammerlab/cohorts
cohorts/cohort.py
Cohort._list_patient_ids
def _list_patient_ids(self): """ Utility function to return a list of patient ids in the Cohort """ results = [] for patient in self: results.append(patient.id) return(results)
python
def _list_patient_ids(self): """ Utility function to return a list of patient ids in the Cohort """ results = [] for patient in self: results.append(patient.id) return(results)
[ "def", "_list_patient_ids", "(", "self", ")", ":", "results", "=", "[", "]", "for", "patient", "in", "self", ":", "results", ".", "append", "(", "patient", ".", "id", ")", "return", "(", "results", ")" ]
Utility function to return a list of patient ids in the Cohort
[ "Utility", "function", "to", "return", "a", "list", "of", "patient", "ids", "in", "the", "Cohort" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1410-L1416
hammerlab/cohorts
cohorts/cohort.py
Cohort.summarize_provenance_per_cache
def summarize_provenance_per_cache(self): """Utility function to summarize provenance files for cached items used by a Cohort, for each cache_dir that exists. Only existing cache_dirs are summarized. This is a summary of provenance files because the function checks to see whether all patients data have the same provenance within the cache dir. The function assumes that it will be desireable to have all patients data generated using the same environment, for each cache type. At the moment, most PROVENANCE files contain details about packages used to generat e the cached data file. However, this function is generic & so it summarizes the contents of those files irrespective of their contents. Returns ---------- Dict containing summarized provenance for each existing cache_dir, after checking to see that provenance files are identical among all patients in the data frame for that cache_dir. If conflicting PROVENANCE files are discovered within a cache-dir: - a warning is generated, describing the conflict - and, a value of `None` is returned in the dictionary for that cache-dir See also ----------- * `?cohorts.Cohort.summarize_provenance` which summarizes provenance files among cache_dirs. * `?cohorts.Cohort.summarize_dataframe` which hashes/summarizes contents of the data frame for this cohort. """ provenance_summary = {} df = self.as_dataframe() for cache in self.cache_names: cache_name = self.cache_names[cache] cache_provenance = None num_discrepant = 0 this_cache_dir = path.join(self.cache_dir, cache_name) if path.exists(this_cache_dir): for patient_id in self._list_patient_ids(): patient_cache_dir = path.join(this_cache_dir, patient_id) try: this_provenance = self.load_provenance(patient_cache_dir = patient_cache_dir) except: this_provenance = None if this_provenance: if not(cache_provenance): cache_provenance = this_provenance else: num_discrepant += compare_provenance(this_provenance, cache_provenance) if num_discrepant == 0: provenance_summary[cache_name] = cache_provenance else: provenance_summary[cache_name] = None return(provenance_summary)
python
def summarize_provenance_per_cache(self): """Utility function to summarize provenance files for cached items used by a Cohort, for each cache_dir that exists. Only existing cache_dirs are summarized. This is a summary of provenance files because the function checks to see whether all patients data have the same provenance within the cache dir. The function assumes that it will be desireable to have all patients data generated using the same environment, for each cache type. At the moment, most PROVENANCE files contain details about packages used to generat e the cached data file. However, this function is generic & so it summarizes the contents of those files irrespective of their contents. Returns ---------- Dict containing summarized provenance for each existing cache_dir, after checking to see that provenance files are identical among all patients in the data frame for that cache_dir. If conflicting PROVENANCE files are discovered within a cache-dir: - a warning is generated, describing the conflict - and, a value of `None` is returned in the dictionary for that cache-dir See also ----------- * `?cohorts.Cohort.summarize_provenance` which summarizes provenance files among cache_dirs. * `?cohorts.Cohort.summarize_dataframe` which hashes/summarizes contents of the data frame for this cohort. """ provenance_summary = {} df = self.as_dataframe() for cache in self.cache_names: cache_name = self.cache_names[cache] cache_provenance = None num_discrepant = 0 this_cache_dir = path.join(self.cache_dir, cache_name) if path.exists(this_cache_dir): for patient_id in self._list_patient_ids(): patient_cache_dir = path.join(this_cache_dir, patient_id) try: this_provenance = self.load_provenance(patient_cache_dir = patient_cache_dir) except: this_provenance = None if this_provenance: if not(cache_provenance): cache_provenance = this_provenance else: num_discrepant += compare_provenance(this_provenance, cache_provenance) if num_discrepant == 0: provenance_summary[cache_name] = cache_provenance else: provenance_summary[cache_name] = None return(provenance_summary)
[ "def", "summarize_provenance_per_cache", "(", "self", ")", ":", "provenance_summary", "=", "{", "}", "df", "=", "self", ".", "as_dataframe", "(", ")", "for", "cache", "in", "self", ".", "cache_names", ":", "cache_name", "=", "self", ".", "cache_names", "[", "cache", "]", "cache_provenance", "=", "None", "num_discrepant", "=", "0", "this_cache_dir", "=", "path", ".", "join", "(", "self", ".", "cache_dir", ",", "cache_name", ")", "if", "path", ".", "exists", "(", "this_cache_dir", ")", ":", "for", "patient_id", "in", "self", ".", "_list_patient_ids", "(", ")", ":", "patient_cache_dir", "=", "path", ".", "join", "(", "this_cache_dir", ",", "patient_id", ")", "try", ":", "this_provenance", "=", "self", ".", "load_provenance", "(", "patient_cache_dir", "=", "patient_cache_dir", ")", "except", ":", "this_provenance", "=", "None", "if", "this_provenance", ":", "if", "not", "(", "cache_provenance", ")", ":", "cache_provenance", "=", "this_provenance", "else", ":", "num_discrepant", "+=", "compare_provenance", "(", "this_provenance", ",", "cache_provenance", ")", "if", "num_discrepant", "==", "0", ":", "provenance_summary", "[", "cache_name", "]", "=", "cache_provenance", "else", ":", "provenance_summary", "[", "cache_name", "]", "=", "None", "return", "(", "provenance_summary", ")" ]
Utility function to summarize provenance files for cached items used by a Cohort, for each cache_dir that exists. Only existing cache_dirs are summarized. This is a summary of provenance files because the function checks to see whether all patients data have the same provenance within the cache dir. The function assumes that it will be desireable to have all patients data generated using the same environment, for each cache type. At the moment, most PROVENANCE files contain details about packages used to generat e the cached data file. However, this function is generic & so it summarizes the contents of those files irrespective of their contents. Returns ---------- Dict containing summarized provenance for each existing cache_dir, after checking to see that provenance files are identical among all patients in the data frame for that cache_dir. If conflicting PROVENANCE files are discovered within a cache-dir: - a warning is generated, describing the conflict - and, a value of `None` is returned in the dictionary for that cache-dir See also ----------- * `?cohorts.Cohort.summarize_provenance` which summarizes provenance files among cache_dirs. * `?cohorts.Cohort.summarize_dataframe` which hashes/summarizes contents of the data frame for this cohort.
[ "Utility", "function", "to", "summarize", "provenance", "files", "for", "cached", "items", "used", "by", "a", "Cohort", "for", "each", "cache_dir", "that", "exists", ".", "Only", "existing", "cache_dirs", "are", "summarized", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1418-L1471
hammerlab/cohorts
cohorts/cohort.py
Cohort.summarize_dataframe
def summarize_dataframe(self): """Summarize default dataframe for this cohort using a hash function. Useful for confirming the version of data used in various reports, e.g. ipynbs """ if self.dataframe_hash: return(self.dataframe_hash) else: df = self._as_dataframe_unmodified() return(self.dataframe_hash)
python
def summarize_dataframe(self): """Summarize default dataframe for this cohort using a hash function. Useful for confirming the version of data used in various reports, e.g. ipynbs """ if self.dataframe_hash: return(self.dataframe_hash) else: df = self._as_dataframe_unmodified() return(self.dataframe_hash)
[ "def", "summarize_dataframe", "(", "self", ")", ":", "if", "self", ".", "dataframe_hash", ":", "return", "(", "self", ".", "dataframe_hash", ")", "else", ":", "df", "=", "self", ".", "_as_dataframe_unmodified", "(", ")", "return", "(", "self", ".", "dataframe_hash", ")" ]
Summarize default dataframe for this cohort using a hash function. Useful for confirming the version of data used in various reports, e.g. ipynbs
[ "Summarize", "default", "dataframe", "for", "this", "cohort", "using", "a", "hash", "function", ".", "Useful", "for", "confirming", "the", "version", "of", "data", "used", "in", "various", "reports", "e", ".", "g", ".", "ipynbs" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1473-L1481
hammerlab/cohorts
cohorts/cohort.py
Cohort.summarize_provenance
def summarize_provenance(self): """Utility function to summarize provenance files for cached items used by a Cohort. At the moment, most PROVENANCE files contain details about packages used to generate files. However, this function is generic & so it summarizes the contents of those files irrespective of their contents. Returns ---------- Dict containing summary of provenance items, among all cache dirs used by the Cohort. IE if all provenances are identical across all cache dirs, then a single set of provenances is returned. Otherwise, if all provenances are not identical, the provenance items per cache_dir are returned. See also ---------- `?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance for each existing cache_dir. """ provenance_per_cache = self.summarize_provenance_per_cache() summary_provenance = None num_discrepant = 0 for cache in provenance_per_cache: if not(summary_provenance): ## pick arbitrary provenance & call this the "summary" (for now) summary_provenance = provenance_per_cache[cache] summary_provenance_name = cache ## for each cache, check equivalence with summary_provenance num_discrepant += compare_provenance( provenance_per_cache[cache], summary_provenance, left_outer_diff = "In %s but not in %s" % (cache, summary_provenance_name), right_outer_diff = "In %s but not in %s" % (summary_provenance_name, cache) ) ## compare provenance across cached items if num_discrepant == 0: prov = summary_provenance ## report summary provenance if exists else: prov = provenance_per_cache ## otherwise, return provenance per cache return(prov)
python
def summarize_provenance(self): """Utility function to summarize provenance files for cached items used by a Cohort. At the moment, most PROVENANCE files contain details about packages used to generate files. However, this function is generic & so it summarizes the contents of those files irrespective of their contents. Returns ---------- Dict containing summary of provenance items, among all cache dirs used by the Cohort. IE if all provenances are identical across all cache dirs, then a single set of provenances is returned. Otherwise, if all provenances are not identical, the provenance items per cache_dir are returned. See also ---------- `?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance for each existing cache_dir. """ provenance_per_cache = self.summarize_provenance_per_cache() summary_provenance = None num_discrepant = 0 for cache in provenance_per_cache: if not(summary_provenance): ## pick arbitrary provenance & call this the "summary" (for now) summary_provenance = provenance_per_cache[cache] summary_provenance_name = cache ## for each cache, check equivalence with summary_provenance num_discrepant += compare_provenance( provenance_per_cache[cache], summary_provenance, left_outer_diff = "In %s but not in %s" % (cache, summary_provenance_name), right_outer_diff = "In %s but not in %s" % (summary_provenance_name, cache) ) ## compare provenance across cached items if num_discrepant == 0: prov = summary_provenance ## report summary provenance if exists else: prov = provenance_per_cache ## otherwise, return provenance per cache return(prov)
[ "def", "summarize_provenance", "(", "self", ")", ":", "provenance_per_cache", "=", "self", ".", "summarize_provenance_per_cache", "(", ")", "summary_provenance", "=", "None", "num_discrepant", "=", "0", "for", "cache", "in", "provenance_per_cache", ":", "if", "not", "(", "summary_provenance", ")", ":", "## pick arbitrary provenance & call this the \"summary\" (for now)", "summary_provenance", "=", "provenance_per_cache", "[", "cache", "]", "summary_provenance_name", "=", "cache", "## for each cache, check equivalence with summary_provenance", "num_discrepant", "+=", "compare_provenance", "(", "provenance_per_cache", "[", "cache", "]", ",", "summary_provenance", ",", "left_outer_diff", "=", "\"In %s but not in %s\"", "%", "(", "cache", ",", "summary_provenance_name", ")", ",", "right_outer_diff", "=", "\"In %s but not in %s\"", "%", "(", "summary_provenance_name", ",", "cache", ")", ")", "## compare provenance across cached items", "if", "num_discrepant", "==", "0", ":", "prov", "=", "summary_provenance", "## report summary provenance if exists", "else", ":", "prov", "=", "provenance_per_cache", "## otherwise, return provenance per cache", "return", "(", "prov", ")" ]
Utility function to summarize provenance files for cached items used by a Cohort. At the moment, most PROVENANCE files contain details about packages used to generate files. However, this function is generic & so it summarizes the contents of those files irrespective of their contents. Returns ---------- Dict containing summary of provenance items, among all cache dirs used by the Cohort. IE if all provenances are identical across all cache dirs, then a single set of provenances is returned. Otherwise, if all provenances are not identical, the provenance items per cache_dir are returned. See also ---------- `?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance for each existing cache_dir.
[ "Utility", "function", "to", "summarize", "provenance", "files", "for", "cached", "items", "used", "by", "a", "Cohort", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1483-L1523
hammerlab/cohorts
cohorts/cohort.py
Cohort.summarize_data_sources
def summarize_data_sources(self): """Utility function to summarize data source status for this Cohort, useful for confirming the state of data used for an analysis Returns ---------- Dictionary with summary of data sources Currently contains - dataframe_hash: hash of the dataframe (see `?cohorts.Cohort.summarize_dataframe`) - provenance_file_summary: summary of provenance file contents (see `?cohorts.Cohort.summarize_provenance`) """ provenance_file_summary = self.summarize_provenance() dataframe_hash = self.summarize_dataframe() results = { "provenance_file_summary": provenance_file_summary, "dataframe_hash": dataframe_hash } return(results)
python
def summarize_data_sources(self): """Utility function to summarize data source status for this Cohort, useful for confirming the state of data used for an analysis Returns ---------- Dictionary with summary of data sources Currently contains - dataframe_hash: hash of the dataframe (see `?cohorts.Cohort.summarize_dataframe`) - provenance_file_summary: summary of provenance file contents (see `?cohorts.Cohort.summarize_provenance`) """ provenance_file_summary = self.summarize_provenance() dataframe_hash = self.summarize_dataframe() results = { "provenance_file_summary": provenance_file_summary, "dataframe_hash": dataframe_hash } return(results)
[ "def", "summarize_data_sources", "(", "self", ")", ":", "provenance_file_summary", "=", "self", ".", "summarize_provenance", "(", ")", "dataframe_hash", "=", "self", ".", "summarize_dataframe", "(", ")", "results", "=", "{", "\"provenance_file_summary\"", ":", "provenance_file_summary", ",", "\"dataframe_hash\"", ":", "dataframe_hash", "}", "return", "(", "results", ")" ]
Utility function to summarize data source status for this Cohort, useful for confirming the state of data used for an analysis Returns ---------- Dictionary with summary of data sources Currently contains - dataframe_hash: hash of the dataframe (see `?cohorts.Cohort.summarize_dataframe`) - provenance_file_summary: summary of provenance file contents (see `?cohorts.Cohort.summarize_provenance`)
[ "Utility", "function", "to", "summarize", "data", "source", "status", "for", "this", "Cohort", "useful", "for", "confirming", "the", "state", "of", "data", "used", "for", "an", "analysis" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L1525-L1543
hammerlab/cohorts
cohorts/variant_stats.py
strelka_somatic_variant_stats
def strelka_somatic_variant_stats(variant, variant_metadata): """Parse out the variant calling statistics for a given variant from a Strelka VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Strelka VCF Returns ------- SomaticVariantStats """ sample_info = variant_metadata["sample_info"] # Ensure there are exactly two samples in the VCF, a tumor and normal assert len(sample_info) == 2, "More than two samples found in the somatic VCF" tumor_stats = _strelka_variant_stats(variant, sample_info["TUMOR"]) normal_stats = _strelka_variant_stats(variant, sample_info["NORMAL"]) return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
python
def strelka_somatic_variant_stats(variant, variant_metadata): """Parse out the variant calling statistics for a given variant from a Strelka VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Strelka VCF Returns ------- SomaticVariantStats """ sample_info = variant_metadata["sample_info"] # Ensure there are exactly two samples in the VCF, a tumor and normal assert len(sample_info) == 2, "More than two samples found in the somatic VCF" tumor_stats = _strelka_variant_stats(variant, sample_info["TUMOR"]) normal_stats = _strelka_variant_stats(variant, sample_info["NORMAL"]) return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
[ "def", "strelka_somatic_variant_stats", "(", "variant", ",", "variant_metadata", ")", ":", "sample_info", "=", "variant_metadata", "[", "\"sample_info\"", "]", "# Ensure there are exactly two samples in the VCF, a tumor and normal", "assert", "len", "(", "sample_info", ")", "==", "2", ",", "\"More than two samples found in the somatic VCF\"", "tumor_stats", "=", "_strelka_variant_stats", "(", "variant", ",", "sample_info", "[", "\"TUMOR\"", "]", ")", "normal_stats", "=", "_strelka_variant_stats", "(", "variant", ",", "sample_info", "[", "\"NORMAL\"", "]", ")", "return", "SomaticVariantStats", "(", "tumor_stats", "=", "tumor_stats", ",", "normal_stats", "=", "normal_stats", ")" ]
Parse out the variant calling statistics for a given variant from a Strelka VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Strelka VCF Returns ------- SomaticVariantStats
[ "Parse", "out", "the", "variant", "calling", "statistics", "for", "a", "given", "variant", "from", "a", "Strelka", "VCF" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_stats.py#L24-L44
hammerlab/cohorts
cohorts/variant_stats.py
_strelka_variant_stats
def _strelka_variant_stats(variant, sample_info): """Parse a single sample"s variant calling statistics based on Strelka VCF output Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of Strelka-specific variant calling fields Returns ------- VariantStats """ if variant.is_deletion or variant.is_insertion: # ref: https://sites.google.com/site/strelkasomaticvariantcaller/home/somatic-variant-output ref_depth = int(sample_info['TAR'][0]) # number of reads supporting ref allele (non-deletion) alt_depth = int(sample_info['TIR'][0]) # number of reads supporting alt allele (deletion) depth = ref_depth + alt_depth else: # Retrieve the Tier 1 counts from Strelka ref_depth = int(sample_info[variant.ref+"U"][0]) alt_depth = int(sample_info[variant.alt+"U"][0]) depth = alt_depth + ref_depth if depth > 0: vaf = float(alt_depth) / depth else: # unclear how to define vaf if no reads support variant # up to user to interpret this (hopefully filtered out in QC settings) vaf = None return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
python
def _strelka_variant_stats(variant, sample_info): """Parse a single sample"s variant calling statistics based on Strelka VCF output Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of Strelka-specific variant calling fields Returns ------- VariantStats """ if variant.is_deletion or variant.is_insertion: # ref: https://sites.google.com/site/strelkasomaticvariantcaller/home/somatic-variant-output ref_depth = int(sample_info['TAR'][0]) # number of reads supporting ref allele (non-deletion) alt_depth = int(sample_info['TIR'][0]) # number of reads supporting alt allele (deletion) depth = ref_depth + alt_depth else: # Retrieve the Tier 1 counts from Strelka ref_depth = int(sample_info[variant.ref+"U"][0]) alt_depth = int(sample_info[variant.alt+"U"][0]) depth = alt_depth + ref_depth if depth > 0: vaf = float(alt_depth) / depth else: # unclear how to define vaf if no reads support variant # up to user to interpret this (hopefully filtered out in QC settings) vaf = None return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
[ "def", "_strelka_variant_stats", "(", "variant", ",", "sample_info", ")", ":", "if", "variant", ".", "is_deletion", "or", "variant", ".", "is_insertion", ":", "# ref: https://sites.google.com/site/strelkasomaticvariantcaller/home/somatic-variant-output", "ref_depth", "=", "int", "(", "sample_info", "[", "'TAR'", "]", "[", "0", "]", ")", "# number of reads supporting ref allele (non-deletion)", "alt_depth", "=", "int", "(", "sample_info", "[", "'TIR'", "]", "[", "0", "]", ")", "# number of reads supporting alt allele (deletion)", "depth", "=", "ref_depth", "+", "alt_depth", "else", ":", "# Retrieve the Tier 1 counts from Strelka", "ref_depth", "=", "int", "(", "sample_info", "[", "variant", ".", "ref", "+", "\"U\"", "]", "[", "0", "]", ")", "alt_depth", "=", "int", "(", "sample_info", "[", "variant", ".", "alt", "+", "\"U\"", "]", "[", "0", "]", ")", "depth", "=", "alt_depth", "+", "ref_depth", "if", "depth", ">", "0", ":", "vaf", "=", "float", "(", "alt_depth", ")", "/", "depth", "else", ":", "# unclear how to define vaf if no reads support variant", "# up to user to interpret this (hopefully filtered out in QC settings)", "vaf", "=", "None", "return", "VariantStats", "(", "depth", "=", "depth", ",", "alt_depth", "=", "alt_depth", ",", "variant_allele_frequency", "=", "vaf", ")" ]
Parse a single sample"s variant calling statistics based on Strelka VCF output Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of Strelka-specific variant calling fields Returns ------- VariantStats
[ "Parse", "a", "single", "sample", "s", "variant", "calling", "statistics", "based", "on", "Strelka", "VCF", "output" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_stats.py#L46-L77
hammerlab/cohorts
cohorts/variant_stats.py
mutect_somatic_variant_stats
def mutect_somatic_variant_stats(variant, variant_metadata): """Parse out the variant calling statistics for a given variant from a Mutect VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Mutect VCF Returns ------- SomaticVariantStats """ sample_info = variant_metadata["sample_info"] # Ensure there are exactly two samples in the VCF, a tumor and normal assert len(sample_info) == 2, "More than two samples found in the somatic VCF" # Find the sample with the genotype field set to variant in the VCF tumor_sample_infos = [info for info in sample_info.values() if info["GT"] == "0/1"] # Ensure there is only one such sample assert len(tumor_sample_infos) == 1, "More than one tumor sample found in the VCF file" tumor_sample_info = tumor_sample_infos[0] normal_sample_info = [info for info in sample_info.values() if info["GT"] != "0/1"][0] tumor_stats = _mutect_variant_stats(variant, tumor_sample_info) normal_stats = _mutect_variant_stats(variant, normal_sample_info) return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
python
def mutect_somatic_variant_stats(variant, variant_metadata): """Parse out the variant calling statistics for a given variant from a Mutect VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Mutect VCF Returns ------- SomaticVariantStats """ sample_info = variant_metadata["sample_info"] # Ensure there are exactly two samples in the VCF, a tumor and normal assert len(sample_info) == 2, "More than two samples found in the somatic VCF" # Find the sample with the genotype field set to variant in the VCF tumor_sample_infos = [info for info in sample_info.values() if info["GT"] == "0/1"] # Ensure there is only one such sample assert len(tumor_sample_infos) == 1, "More than one tumor sample found in the VCF file" tumor_sample_info = tumor_sample_infos[0] normal_sample_info = [info for info in sample_info.values() if info["GT"] != "0/1"][0] tumor_stats = _mutect_variant_stats(variant, tumor_sample_info) normal_stats = _mutect_variant_stats(variant, normal_sample_info) return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
[ "def", "mutect_somatic_variant_stats", "(", "variant", ",", "variant_metadata", ")", ":", "sample_info", "=", "variant_metadata", "[", "\"sample_info\"", "]", "# Ensure there are exactly two samples in the VCF, a tumor and normal", "assert", "len", "(", "sample_info", ")", "==", "2", ",", "\"More than two samples found in the somatic VCF\"", "# Find the sample with the genotype field set to variant in the VCF", "tumor_sample_infos", "=", "[", "info", "for", "info", "in", "sample_info", ".", "values", "(", ")", "if", "info", "[", "\"GT\"", "]", "==", "\"0/1\"", "]", "# Ensure there is only one such sample", "assert", "len", "(", "tumor_sample_infos", ")", "==", "1", ",", "\"More than one tumor sample found in the VCF file\"", "tumor_sample_info", "=", "tumor_sample_infos", "[", "0", "]", "normal_sample_info", "=", "[", "info", "for", "info", "in", "sample_info", ".", "values", "(", ")", "if", "info", "[", "\"GT\"", "]", "!=", "\"0/1\"", "]", "[", "0", "]", "tumor_stats", "=", "_mutect_variant_stats", "(", "variant", ",", "tumor_sample_info", ")", "normal_stats", "=", "_mutect_variant_stats", "(", "variant", ",", "normal_sample_info", ")", "return", "SomaticVariantStats", "(", "tumor_stats", "=", "tumor_stats", ",", "normal_stats", "=", "normal_stats", ")" ]
Parse out the variant calling statistics for a given variant from a Mutect VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Mutect VCF Returns ------- SomaticVariantStats
[ "Parse", "out", "the", "variant", "calling", "statistics", "for", "a", "given", "variant", "from", "a", "Mutect", "VCF" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_stats.py#L79-L109
hammerlab/cohorts
cohorts/variant_stats.py
_mutect_variant_stats
def _mutect_variant_stats(variant, sample_info): """Parse a single sample"s variant calling statistics based on Mutect"s (v1) VCF output Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of Mutect-specific variant calling fields Returns ------- VariantStats """ # Parse out the AD (or allele depth field), which is an array of [REF_DEPTH, ALT_DEPTH] ref_depth, alt_depth = sample_info["AD"] depth = int(ref_depth) + int(alt_depth) vaf = float(alt_depth) / depth return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
python
def _mutect_variant_stats(variant, sample_info): """Parse a single sample"s variant calling statistics based on Mutect"s (v1) VCF output Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of Mutect-specific variant calling fields Returns ------- VariantStats """ # Parse out the AD (or allele depth field), which is an array of [REF_DEPTH, ALT_DEPTH] ref_depth, alt_depth = sample_info["AD"] depth = int(ref_depth) + int(alt_depth) vaf = float(alt_depth) / depth return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
[ "def", "_mutect_variant_stats", "(", "variant", ",", "sample_info", ")", ":", "# Parse out the AD (or allele depth field), which is an array of [REF_DEPTH, ALT_DEPTH]", "ref_depth", ",", "alt_depth", "=", "sample_info", "[", "\"AD\"", "]", "depth", "=", "int", "(", "ref_depth", ")", "+", "int", "(", "alt_depth", ")", "vaf", "=", "float", "(", "alt_depth", ")", "/", "depth", "return", "VariantStats", "(", "depth", "=", "depth", ",", "alt_depth", "=", "alt_depth", ",", "variant_allele_frequency", "=", "vaf", ")" ]
Parse a single sample"s variant calling statistics based on Mutect"s (v1) VCF output Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of Mutect-specific variant calling fields Returns ------- VariantStats
[ "Parse", "a", "single", "sample", "s", "variant", "calling", "statistics", "based", "on", "Mutect", "s", "(", "v1", ")", "VCF", "output" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_stats.py#L111-L130
hammerlab/cohorts
cohorts/variant_stats.py
maf_somatic_variant_stats
def maf_somatic_variant_stats(variant, variant_metadata): """ Parse out the variant calling statistics for a given variant from a MAF file Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777 Parameters ---------- variant : varcode.Variant variant_metadata : dict Dictionary of metadata for this variant Returns ------- SomaticVariantStats """ tumor_stats = None normal_stats = None if "t_ref_count" in variant_metadata: tumor_stats = _maf_variant_stats(variant, variant_metadata, prefix="t") if "n_ref_count" in variant_metadata: normal_stats = _maf_variant_stats(variant, variant_metadata, prefix="n") return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
python
def maf_somatic_variant_stats(variant, variant_metadata): """ Parse out the variant calling statistics for a given variant from a MAF file Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777 Parameters ---------- variant : varcode.Variant variant_metadata : dict Dictionary of metadata for this variant Returns ------- SomaticVariantStats """ tumor_stats = None normal_stats = None if "t_ref_count" in variant_metadata: tumor_stats = _maf_variant_stats(variant, variant_metadata, prefix="t") if "n_ref_count" in variant_metadata: normal_stats = _maf_variant_stats(variant, variant_metadata, prefix="n") return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
[ "def", "maf_somatic_variant_stats", "(", "variant", ",", "variant_metadata", ")", ":", "tumor_stats", "=", "None", "normal_stats", "=", "None", "if", "\"t_ref_count\"", "in", "variant_metadata", ":", "tumor_stats", "=", "_maf_variant_stats", "(", "variant", ",", "variant_metadata", ",", "prefix", "=", "\"t\"", ")", "if", "\"n_ref_count\"", "in", "variant_metadata", ":", "normal_stats", "=", "_maf_variant_stats", "(", "variant", ",", "variant_metadata", ",", "prefix", "=", "\"n\"", ")", "return", "SomaticVariantStats", "(", "tumor_stats", "=", "tumor_stats", ",", "normal_stats", "=", "normal_stats", ")" ]
Parse out the variant calling statistics for a given variant from a MAF file Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777 Parameters ---------- variant : varcode.Variant variant_metadata : dict Dictionary of metadata for this variant Returns ------- SomaticVariantStats
[ "Parse", "out", "the", "variant", "calling", "statistics", "for", "a", "given", "variant", "from", "a", "MAF", "file" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_stats.py#L139-L161
hammerlab/cohorts
cohorts/variant_stats.py
_vcf_is_strelka
def _vcf_is_strelka(variant_file, variant_metadata): """Return True if variant_file given is in strelka format """ if "strelka" in variant_file.lower(): return True elif "NORMAL" in variant_metadata["sample_info"].keys(): return True else: vcf_reader = vcf.Reader(open(variant_file, "r")) try: vcf_type = vcf_reader.metadata["content"] except KeyError: vcf_type = "" if "strelka" in vcf_type.lower(): return True return False
python
def _vcf_is_strelka(variant_file, variant_metadata): """Return True if variant_file given is in strelka format """ if "strelka" in variant_file.lower(): return True elif "NORMAL" in variant_metadata["sample_info"].keys(): return True else: vcf_reader = vcf.Reader(open(variant_file, "r")) try: vcf_type = vcf_reader.metadata["content"] except KeyError: vcf_type = "" if "strelka" in vcf_type.lower(): return True return False
[ "def", "_vcf_is_strelka", "(", "variant_file", ",", "variant_metadata", ")", ":", "if", "\"strelka\"", "in", "variant_file", ".", "lower", "(", ")", ":", "return", "True", "elif", "\"NORMAL\"", "in", "variant_metadata", "[", "\"sample_info\"", "]", ".", "keys", "(", ")", ":", "return", "True", "else", ":", "vcf_reader", "=", "vcf", ".", "Reader", "(", "open", "(", "variant_file", ",", "\"r\"", ")", ")", "try", ":", "vcf_type", "=", "vcf_reader", ".", "metadata", "[", "\"content\"", "]", "except", "KeyError", ":", "vcf_type", "=", "\"\"", "if", "\"strelka\"", "in", "vcf_type", ".", "lower", "(", ")", ":", "return", "True", "return", "False" ]
Return True if variant_file given is in strelka format
[ "Return", "True", "if", "variant_file", "given", "is", "in", "strelka", "format" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_stats.py#L163-L178
hammerlab/cohorts
cohorts/variant_stats.py
variant_stats_from_variant
def variant_stats_from_variant(variant, metadata, merge_fn=(lambda all_stats: \ max(all_stats, key=(lambda stats: stats.tumor_stats.depth)))): """Parse the variant calling stats from a variant called from multiple variant files. The stats are merged based on `merge_fn` Parameters ---------- variant : varcode.Variant metadata : dict Dictionary of variant file to variant calling metadata from that file merge_fn : function Function from list of SomaticVariantStats to single SomaticVariantStats. This is used if a variant is called by multiple callers or appears in multiple VCFs. By default, this uses the data from the caller that had a higher tumor depth. Returns ------- SomaticVariantStats """ all_stats = [] for (variant_file, variant_metadata) in metadata.items(): if _vcf_is_maf(variant_file=variant_file): stats = maf_somatic_variant_stats(variant, variant_metadata) elif _vcf_is_strelka(variant_file=variant_file, variant_metadata=variant_metadata): stats = strelka_somatic_variant_stats(variant, variant_metadata) elif _vcf_is_mutect(variant_file=variant_file, variant_metadata=variant_metadata): stats = mutect_somatic_variant_stats(variant, variant_metadata) else: raise ValueError("Cannot parse sample fields, variant file {} is from an unsupported caller.".format(variant_file)) all_stats.append(stats) return merge_fn(all_stats)
python
def variant_stats_from_variant(variant, metadata, merge_fn=(lambda all_stats: \ max(all_stats, key=(lambda stats: stats.tumor_stats.depth)))): """Parse the variant calling stats from a variant called from multiple variant files. The stats are merged based on `merge_fn` Parameters ---------- variant : varcode.Variant metadata : dict Dictionary of variant file to variant calling metadata from that file merge_fn : function Function from list of SomaticVariantStats to single SomaticVariantStats. This is used if a variant is called by multiple callers or appears in multiple VCFs. By default, this uses the data from the caller that had a higher tumor depth. Returns ------- SomaticVariantStats """ all_stats = [] for (variant_file, variant_metadata) in metadata.items(): if _vcf_is_maf(variant_file=variant_file): stats = maf_somatic_variant_stats(variant, variant_metadata) elif _vcf_is_strelka(variant_file=variant_file, variant_metadata=variant_metadata): stats = strelka_somatic_variant_stats(variant, variant_metadata) elif _vcf_is_mutect(variant_file=variant_file, variant_metadata=variant_metadata): stats = mutect_somatic_variant_stats(variant, variant_metadata) else: raise ValueError("Cannot parse sample fields, variant file {} is from an unsupported caller.".format(variant_file)) all_stats.append(stats) return merge_fn(all_stats)
[ "def", "variant_stats_from_variant", "(", "variant", ",", "metadata", ",", "merge_fn", "=", "(", "lambda", "all_stats", ":", "max", "(", "all_stats", ",", "key", "=", "(", "lambda", "stats", ":", "stats", ".", "tumor_stats", ".", "depth", ")", ")", ")", ")", ":", "all_stats", "=", "[", "]", "for", "(", "variant_file", ",", "variant_metadata", ")", "in", "metadata", ".", "items", "(", ")", ":", "if", "_vcf_is_maf", "(", "variant_file", "=", "variant_file", ")", ":", "stats", "=", "maf_somatic_variant_stats", "(", "variant", ",", "variant_metadata", ")", "elif", "_vcf_is_strelka", "(", "variant_file", "=", "variant_file", ",", "variant_metadata", "=", "variant_metadata", ")", ":", "stats", "=", "strelka_somatic_variant_stats", "(", "variant", ",", "variant_metadata", ")", "elif", "_vcf_is_mutect", "(", "variant_file", "=", "variant_file", ",", "variant_metadata", "=", "variant_metadata", ")", ":", "stats", "=", "mutect_somatic_variant_stats", "(", "variant", ",", "variant_metadata", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot parse sample fields, variant file {} is from an unsupported caller.\"", ".", "format", "(", "variant_file", ")", ")", "all_stats", ".", "append", "(", "stats", ")", "return", "merge_fn", "(", "all_stats", ")" ]
Parse the variant calling stats from a variant called from multiple variant files. The stats are merged based on `merge_fn` Parameters ---------- variant : varcode.Variant metadata : dict Dictionary of variant file to variant calling metadata from that file merge_fn : function Function from list of SomaticVariantStats to single SomaticVariantStats. This is used if a variant is called by multiple callers or appears in multiple VCFs. By default, this uses the data from the caller that had a higher tumor depth. Returns ------- SomaticVariantStats
[ "Parse", "the", "variant", "calling", "stats", "from", "a", "variant", "called", "from", "multiple", "variant", "files", ".", "The", "stats", "are", "merged", "based", "on", "merge_fn" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_stats.py#L202-L236
alvarogzp/telegram-bot-framework
bot/multithreading/worker/pool/workers/limited_lifespan.py
LimitedLifespanQueueWorker._get_and_execute
def _get_and_execute(self): """ :return: True if it should continue running, False if it should end its execution. """ try: work = self.queue.get(timeout=self.max_seconds_idle) except queue.Empty: # max_seconds_idle has been exhausted, exiting self.end_notify() return False else: self._work(work) self.queue.task_done() return True
python
def _get_and_execute(self): """ :return: True if it should continue running, False if it should end its execution. """ try: work = self.queue.get(timeout=self.max_seconds_idle) except queue.Empty: # max_seconds_idle has been exhausted, exiting self.end_notify() return False else: self._work(work) self.queue.task_done() return True
[ "def", "_get_and_execute", "(", "self", ")", ":", "try", ":", "work", "=", "self", ".", "queue", ".", "get", "(", "timeout", "=", "self", ".", "max_seconds_idle", ")", "except", "queue", ".", "Empty", ":", "# max_seconds_idle has been exhausted, exiting", "self", ".", "end_notify", "(", ")", "return", "False", "else", ":", "self", ".", "_work", "(", "work", ")", "self", ".", "queue", ".", "task_done", "(", ")", "return", "True" ]
:return: True if it should continue running, False if it should end its execution.
[ ":", "return", ":", "True", "if", "it", "should", "continue", "running", "False", "if", "it", "should", "end", "its", "execution", "." ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/multithreading/worker/pool/workers/limited_lifespan.py#L21-L34
alvarogzp/telegram-bot-framework
bot/action/standard/info/formatter/chat.py
ChatInfoFormatter.format
def format(self, full_info: bool = False): """ :param full_info: If True, adds more info about the chat. Please, note that this additional info requires to make up to THREE synchronous api calls. """ chat = self.api_object if full_info: self.__format_full(chat) else: self.__format_simple(chat)
python
def format(self, full_info: bool = False): """ :param full_info: If True, adds more info about the chat. Please, note that this additional info requires to make up to THREE synchronous api calls. """ chat = self.api_object if full_info: self.__format_full(chat) else: self.__format_simple(chat)
[ "def", "format", "(", "self", ",", "full_info", ":", "bool", "=", "False", ")", ":", "chat", "=", "self", ".", "api_object", "if", "full_info", ":", "self", ".", "__format_full", "(", "chat", ")", "else", ":", "self", ".", "__format_simple", "(", "chat", ")" ]
:param full_info: If True, adds more info about the chat. Please, note that this additional info requires to make up to THREE synchronous api calls.
[ ":", "param", "full_info", ":", "If", "True", "adds", "more", "info", "about", "the", "chat", ".", "Please", "note", "that", "this", "additional", "info", "requires", "to", "make", "up", "to", "THREE", "synchronous", "api", "calls", "." ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/info/formatter/chat.py#L20-L29
alvarogzp/telegram-bot-framework
bot/action/standard/chatsettings/__init__.py
ChatSettings.list
def list(self): """ :rtype: list(setting_name, value, default_value, is_set, is_supported) """ settings = [] for setting in _SETTINGS: value = self.get(setting) is_set = self.is_set(setting) default_value = self.get_default_value(setting) is_supported = True settings.append((setting, value, default_value, is_set, is_supported)) for setting in sorted(self.settings_state.list_keys()): if not self.is_supported(setting): value = self.get(setting) default_value = None is_set = True is_supported = False settings.append((setting, value, default_value, is_set, is_supported)) return settings
python
def list(self): """ :rtype: list(setting_name, value, default_value, is_set, is_supported) """ settings = [] for setting in _SETTINGS: value = self.get(setting) is_set = self.is_set(setting) default_value = self.get_default_value(setting) is_supported = True settings.append((setting, value, default_value, is_set, is_supported)) for setting in sorted(self.settings_state.list_keys()): if not self.is_supported(setting): value = self.get(setting) default_value = None is_set = True is_supported = False settings.append((setting, value, default_value, is_set, is_supported)) return settings
[ "def", "list", "(", "self", ")", ":", "settings", "=", "[", "]", "for", "setting", "in", "_SETTINGS", ":", "value", "=", "self", ".", "get", "(", "setting", ")", "is_set", "=", "self", ".", "is_set", "(", "setting", ")", "default_value", "=", "self", ".", "get_default_value", "(", "setting", ")", "is_supported", "=", "True", "settings", ".", "append", "(", "(", "setting", ",", "value", ",", "default_value", ",", "is_set", ",", "is_supported", ")", ")", "for", "setting", "in", "sorted", "(", "self", ".", "settings_state", ".", "list_keys", "(", ")", ")", ":", "if", "not", "self", ".", "is_supported", "(", "setting", ")", ":", "value", "=", "self", ".", "get", "(", "setting", ")", "default_value", "=", "None", "is_set", "=", "True", "is_supported", "=", "False", "settings", ".", "append", "(", "(", "setting", ",", "value", ",", "default_value", ",", "is_set", ",", "is_supported", ")", ")", "return", "settings" ]
:rtype: list(setting_name, value, default_value, is_set, is_supported)
[ ":", "rtype", ":", "list", "(", "setting_name", "value", "default_value", "is_set", "is_supported", ")" ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/chatsettings/__init__.py#L42-L60
hammerlab/cohorts
cohorts/variant_filters.py
load_ensembl_coverage
def load_ensembl_coverage(cohort, coverage_path, min_tumor_depth, min_normal_depth=0, pageant_dir_fn=None): """ Load in Pageant CoverageDepth results with Ensembl loci. coverage_path is a path to Pageant CoverageDepth output directory, with one subdirectory per patient and a `cdf.csv` file inside each patient subdir. If min_normal_depth is 0, calculate tumor coverage. Otherwise, calculate join tumor/normal coverage. pageant_dir_fn is a function that takes in a Patient and produces a Pageant dir name. Last tested with Pageant CoverageDepth version 1ca9ed2. """ # Function to grab the pageant file name using the Patient if pageant_dir_fn is None: pageant_dir_fn = lambda patient: patient.id columns_both = [ "depth1", # Normal "depth2", # Tumor "onBP1", "onBP2", "numOnLoci", "fracBPOn1", "fracBPOn2", "fracLociOn", "offBP1", "offBP2", "numOffLoci", "fracBPOff1", "fracBPOff2", "fracLociOff", ] columns_single = [ "depth", "onBP", "numOnLoci", "fracBPOn", "fracLociOn", "offBP", "numOffLoci", "fracBPOff", "fracLociOff" ] if min_normal_depth < 0: raise ValueError("min_normal_depth must be >= 0") use_tumor_only = (min_normal_depth == 0) columns = columns_single if use_tumor_only else columns_both ensembl_loci_dfs = [] for patient in cohort: patient_ensembl_loci_df = pd.read_csv( path.join(coverage_path, pageant_dir_fn(patient), "cdf.csv"), names=columns, header=1) # pylint: disable=no-member # pylint gets confused by read_csv if use_tumor_only: depth_mask = (patient_ensembl_loci_df.depth == min_tumor_depth) else: depth_mask = ( (patient_ensembl_loci_df.depth1 == min_normal_depth) & (patient_ensembl_loci_df.depth2 == min_tumor_depth)) patient_ensembl_loci_df = patient_ensembl_loci_df[depth_mask] assert len(patient_ensembl_loci_df) == 1, ( "Incorrect number of tumor={}, normal={} depth loci results: {} for patient {}".format( min_tumor_depth, min_normal_depth, len(patient_ensembl_loci_df), patient)) patient_ensembl_loci_df["patient_id"] = patient.id ensembl_loci_dfs.append(patient_ensembl_loci_df) ensembl_loci_df = pd.concat(ensembl_loci_dfs) ensembl_loci_df["MB"] = ensembl_loci_df.numOnLoci / 1000000.0 return ensembl_loci_df[["patient_id", "numOnLoci", "MB"]]
python
def load_ensembl_coverage(cohort, coverage_path, min_tumor_depth, min_normal_depth=0, pageant_dir_fn=None): """ Load in Pageant CoverageDepth results with Ensembl loci. coverage_path is a path to Pageant CoverageDepth output directory, with one subdirectory per patient and a `cdf.csv` file inside each patient subdir. If min_normal_depth is 0, calculate tumor coverage. Otherwise, calculate join tumor/normal coverage. pageant_dir_fn is a function that takes in a Patient and produces a Pageant dir name. Last tested with Pageant CoverageDepth version 1ca9ed2. """ # Function to grab the pageant file name using the Patient if pageant_dir_fn is None: pageant_dir_fn = lambda patient: patient.id columns_both = [ "depth1", # Normal "depth2", # Tumor "onBP1", "onBP2", "numOnLoci", "fracBPOn1", "fracBPOn2", "fracLociOn", "offBP1", "offBP2", "numOffLoci", "fracBPOff1", "fracBPOff2", "fracLociOff", ] columns_single = [ "depth", "onBP", "numOnLoci", "fracBPOn", "fracLociOn", "offBP", "numOffLoci", "fracBPOff", "fracLociOff" ] if min_normal_depth < 0: raise ValueError("min_normal_depth must be >= 0") use_tumor_only = (min_normal_depth == 0) columns = columns_single if use_tumor_only else columns_both ensembl_loci_dfs = [] for patient in cohort: patient_ensembl_loci_df = pd.read_csv( path.join(coverage_path, pageant_dir_fn(patient), "cdf.csv"), names=columns, header=1) # pylint: disable=no-member # pylint gets confused by read_csv if use_tumor_only: depth_mask = (patient_ensembl_loci_df.depth == min_tumor_depth) else: depth_mask = ( (patient_ensembl_loci_df.depth1 == min_normal_depth) & (patient_ensembl_loci_df.depth2 == min_tumor_depth)) patient_ensembl_loci_df = patient_ensembl_loci_df[depth_mask] assert len(patient_ensembl_loci_df) == 1, ( "Incorrect number of tumor={}, normal={} depth loci results: {} for patient {}".format( min_tumor_depth, min_normal_depth, len(patient_ensembl_loci_df), patient)) patient_ensembl_loci_df["patient_id"] = patient.id ensembl_loci_dfs.append(patient_ensembl_loci_df) ensembl_loci_df = pd.concat(ensembl_loci_dfs) ensembl_loci_df["MB"] = ensembl_loci_df.numOnLoci / 1000000.0 return ensembl_loci_df[["patient_id", "numOnLoci", "MB"]]
[ "def", "load_ensembl_coverage", "(", "cohort", ",", "coverage_path", ",", "min_tumor_depth", ",", "min_normal_depth", "=", "0", ",", "pageant_dir_fn", "=", "None", ")", ":", "# Function to grab the pageant file name using the Patient", "if", "pageant_dir_fn", "is", "None", ":", "pageant_dir_fn", "=", "lambda", "patient", ":", "patient", ".", "id", "columns_both", "=", "[", "\"depth1\"", ",", "# Normal", "\"depth2\"", ",", "# Tumor", "\"onBP1\"", ",", "\"onBP2\"", ",", "\"numOnLoci\"", ",", "\"fracBPOn1\"", ",", "\"fracBPOn2\"", ",", "\"fracLociOn\"", ",", "\"offBP1\"", ",", "\"offBP2\"", ",", "\"numOffLoci\"", ",", "\"fracBPOff1\"", ",", "\"fracBPOff2\"", ",", "\"fracLociOff\"", ",", "]", "columns_single", "=", "[", "\"depth\"", ",", "\"onBP\"", ",", "\"numOnLoci\"", ",", "\"fracBPOn\"", ",", "\"fracLociOn\"", ",", "\"offBP\"", ",", "\"numOffLoci\"", ",", "\"fracBPOff\"", ",", "\"fracLociOff\"", "]", "if", "min_normal_depth", "<", "0", ":", "raise", "ValueError", "(", "\"min_normal_depth must be >= 0\"", ")", "use_tumor_only", "=", "(", "min_normal_depth", "==", "0", ")", "columns", "=", "columns_single", "if", "use_tumor_only", "else", "columns_both", "ensembl_loci_dfs", "=", "[", "]", "for", "patient", "in", "cohort", ":", "patient_ensembl_loci_df", "=", "pd", ".", "read_csv", "(", "path", ".", "join", "(", "coverage_path", ",", "pageant_dir_fn", "(", "patient", ")", ",", "\"cdf.csv\"", ")", ",", "names", "=", "columns", ",", "header", "=", "1", ")", "# pylint: disable=no-member", "# pylint gets confused by read_csv", "if", "use_tumor_only", ":", "depth_mask", "=", "(", "patient_ensembl_loci_df", ".", "depth", "==", "min_tumor_depth", ")", "else", ":", "depth_mask", "=", "(", "(", "patient_ensembl_loci_df", ".", "depth1", "==", "min_normal_depth", ")", "&", "(", "patient_ensembl_loci_df", ".", "depth2", "==", "min_tumor_depth", ")", ")", "patient_ensembl_loci_df", "=", "patient_ensembl_loci_df", "[", "depth_mask", "]", "assert", "len", "(", "patient_ensembl_loci_df", ")", "==", "1", ",", "(", "\"Incorrect number of tumor={}, normal={} depth loci results: {} for patient {}\"", ".", "format", "(", "min_tumor_depth", ",", "min_normal_depth", ",", "len", "(", "patient_ensembl_loci_df", ")", ",", "patient", ")", ")", "patient_ensembl_loci_df", "[", "\"patient_id\"", "]", "=", "patient", ".", "id", "ensembl_loci_dfs", ".", "append", "(", "patient_ensembl_loci_df", ")", "ensembl_loci_df", "=", "pd", ".", "concat", "(", "ensembl_loci_dfs", ")", "ensembl_loci_df", "[", "\"MB\"", "]", "=", "ensembl_loci_df", ".", "numOnLoci", "/", "1000000.0", "return", "ensembl_loci_df", "[", "[", "\"patient_id\"", ",", "\"numOnLoci\"", ",", "\"MB\"", "]", "]" ]
Load in Pageant CoverageDepth results with Ensembl loci. coverage_path is a path to Pageant CoverageDepth output directory, with one subdirectory per patient and a `cdf.csv` file inside each patient subdir. If min_normal_depth is 0, calculate tumor coverage. Otherwise, calculate join tumor/normal coverage. pageant_dir_fn is a function that takes in a Patient and produces a Pageant dir name. Last tested with Pageant CoverageDepth version 1ca9ed2.
[ "Load", "in", "Pageant", "CoverageDepth", "results", "with", "Ensembl", "loci", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/variant_filters.py#L87-L160
hammerlab/cohorts
cohorts/plot.py
vertical_percent
def vertical_percent(plot, percent=0.1): """ Using the size of the y axis, return a fraction of that size. """ plot_bottom, plot_top = plot.get_ylim() return percent * (plot_top - plot_bottom)
python
def vertical_percent(plot, percent=0.1): """ Using the size of the y axis, return a fraction of that size. """ plot_bottom, plot_top = plot.get_ylim() return percent * (plot_top - plot_bottom)
[ "def", "vertical_percent", "(", "plot", ",", "percent", "=", "0.1", ")", ":", "plot_bottom", ",", "plot_top", "=", "plot", ".", "get_ylim", "(", ")", "return", "percent", "*", "(", "plot_top", "-", "plot_bottom", ")" ]
Using the size of the y axis, return a fraction of that size.
[ "Using", "the", "size", "of", "the", "y", "axis", "return", "a", "fraction", "of", "that", "size", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L24-L29
hammerlab/cohorts
cohorts/plot.py
hide_ticks
def hide_ticks(plot, min_tick_value=None, max_tick_value=None): """Hide tick values that are outside of [min_tick_value, max_tick_value]""" for tick, tick_value in zip(plot.get_yticklabels(), plot.get_yticks()): tick_label = as_numeric(tick_value) if tick_label: if (min_tick_value is not None and tick_label < min_tick_value or max_tick_value is not None and tick_label > max_tick_value): tick.set_visible(False)
python
def hide_ticks(plot, min_tick_value=None, max_tick_value=None): """Hide tick values that are outside of [min_tick_value, max_tick_value]""" for tick, tick_value in zip(plot.get_yticklabels(), plot.get_yticks()): tick_label = as_numeric(tick_value) if tick_label: if (min_tick_value is not None and tick_label < min_tick_value or max_tick_value is not None and tick_label > max_tick_value): tick.set_visible(False)
[ "def", "hide_ticks", "(", "plot", ",", "min_tick_value", "=", "None", ",", "max_tick_value", "=", "None", ")", ":", "for", "tick", ",", "tick_value", "in", "zip", "(", "plot", ".", "get_yticklabels", "(", ")", ",", "plot", ".", "get_yticks", "(", ")", ")", ":", "tick_label", "=", "as_numeric", "(", "tick_value", ")", "if", "tick_label", ":", "if", "(", "min_tick_value", "is", "not", "None", "and", "tick_label", "<", "min_tick_value", "or", "max_tick_value", "is", "not", "None", "and", "tick_label", ">", "max_tick_value", ")", ":", "tick", ".", "set_visible", "(", "False", ")" ]
Hide tick values that are outside of [min_tick_value, max_tick_value]
[ "Hide", "tick", "values", "that", "are", "outside", "of", "[", "min_tick_value", "max_tick_value", "]" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L37-L44
hammerlab/cohorts
cohorts/plot.py
add_significance_indicator
def add_significance_indicator(plot, col_a=0, col_b=1, significant=False): """ Add a p-value significance indicator. """ plot_bottom, plot_top = plot.get_ylim() # Give the plot a little room for the significance indicator line_height = vertical_percent(plot, 0.1) # Add some extra spacing below the indicator plot_top = plot_top + line_height # Add some extra spacing above the indicator plot.set_ylim(top=plot_top + line_height * 2) color = "black" line_top = plot_top + line_height plot.plot([col_a, col_a, col_b, col_b], [plot_top, line_top, line_top, plot_top], lw=1.5, color=color) indicator = "*" if significant else "ns" plot.text((col_a + col_b) * 0.5, line_top, indicator, ha="center", va="bottom", color=color)
python
def add_significance_indicator(plot, col_a=0, col_b=1, significant=False): """ Add a p-value significance indicator. """ plot_bottom, plot_top = plot.get_ylim() # Give the plot a little room for the significance indicator line_height = vertical_percent(plot, 0.1) # Add some extra spacing below the indicator plot_top = plot_top + line_height # Add some extra spacing above the indicator plot.set_ylim(top=plot_top + line_height * 2) color = "black" line_top = plot_top + line_height plot.plot([col_a, col_a, col_b, col_b], [plot_top, line_top, line_top, plot_top], lw=1.5, color=color) indicator = "*" if significant else "ns" plot.text((col_a + col_b) * 0.5, line_top, indicator, ha="center", va="bottom", color=color)
[ "def", "add_significance_indicator", "(", "plot", ",", "col_a", "=", "0", ",", "col_b", "=", "1", ",", "significant", "=", "False", ")", ":", "plot_bottom", ",", "plot_top", "=", "plot", ".", "get_ylim", "(", ")", "# Give the plot a little room for the significance indicator", "line_height", "=", "vertical_percent", "(", "plot", ",", "0.1", ")", "# Add some extra spacing below the indicator", "plot_top", "=", "plot_top", "+", "line_height", "# Add some extra spacing above the indicator", "plot", ".", "set_ylim", "(", "top", "=", "plot_top", "+", "line_height", "*", "2", ")", "color", "=", "\"black\"", "line_top", "=", "plot_top", "+", "line_height", "plot", ".", "plot", "(", "[", "col_a", ",", "col_a", ",", "col_b", ",", "col_b", "]", ",", "[", "plot_top", ",", "line_top", ",", "line_top", ",", "plot_top", "]", ",", "lw", "=", "1.5", ",", "color", "=", "color", ")", "indicator", "=", "\"*\"", "if", "significant", "else", "\"ns\"", "plot", ".", "text", "(", "(", "col_a", "+", "col_b", ")", "*", "0.5", ",", "line_top", ",", "indicator", ",", "ha", "=", "\"center\"", ",", "va", "=", "\"bottom\"", ",", "color", "=", "color", ")" ]
Add a p-value significance indicator.
[ "Add", "a", "p", "-", "value", "significance", "indicator", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L55-L70
hammerlab/cohorts
cohorts/plot.py
stripboxplot
def stripboxplot(x, y, data, ax=None, significant=None, **kwargs): """ Overlay a stripplot on top of a boxplot. """ ax = sb.boxplot( x=x, y=y, data=data, ax=ax, fliersize=0, **kwargs ) plot = sb.stripplot( x=x, y=y, data=data, ax=ax, jitter=kwargs.pop("jitter", 0.05), color=kwargs.pop("color", "0.3"), **kwargs ) if data[y].min() >= 0: hide_negative_y_ticks(plot) if significant is not None: add_significance_indicator(plot=plot, significant=significant) return plot
python
def stripboxplot(x, y, data, ax=None, significant=None, **kwargs): """ Overlay a stripplot on top of a boxplot. """ ax = sb.boxplot( x=x, y=y, data=data, ax=ax, fliersize=0, **kwargs ) plot = sb.stripplot( x=x, y=y, data=data, ax=ax, jitter=kwargs.pop("jitter", 0.05), color=kwargs.pop("color", "0.3"), **kwargs ) if data[y].min() >= 0: hide_negative_y_ticks(plot) if significant is not None: add_significance_indicator(plot=plot, significant=significant) return plot
[ "def", "stripboxplot", "(", "x", ",", "y", ",", "data", ",", "ax", "=", "None", ",", "significant", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ax", "=", "sb", ".", "boxplot", "(", "x", "=", "x", ",", "y", "=", "y", ",", "data", "=", "data", ",", "ax", "=", "ax", ",", "fliersize", "=", "0", ",", "*", "*", "kwargs", ")", "plot", "=", "sb", ".", "stripplot", "(", "x", "=", "x", ",", "y", "=", "y", ",", "data", "=", "data", ",", "ax", "=", "ax", ",", "jitter", "=", "kwargs", ".", "pop", "(", "\"jitter\"", ",", "0.05", ")", ",", "color", "=", "kwargs", ".", "pop", "(", "\"color\"", ",", "\"0.3\"", ")", ",", "*", "*", "kwargs", ")", "if", "data", "[", "y", "]", ".", "min", "(", ")", ">=", "0", ":", "hide_negative_y_ticks", "(", "plot", ")", "if", "significant", "is", "not", "None", ":", "add_significance_indicator", "(", "plot", "=", "plot", ",", "significant", "=", "significant", ")", "return", "plot" ]
Overlay a stripplot on top of a boxplot.
[ "Overlay", "a", "stripplot", "on", "top", "of", "a", "boxplot", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L72-L100
hammerlab/cohorts
cohorts/plot.py
fishers_exact_plot
def fishers_exact_plot(data, condition1, condition2, ax=None, condition1_value=None, alternative="two-sided", **kwargs): """ Perform a Fisher's exact test to compare to binary columns Parameters ---------- data: Pandas dataframe Dataframe to retrieve information from condition1: str First binary column to compare (and used for test sidedness) condition2: str Second binary column to compare ax : Axes, default None Axes to plot on condition1_value: If `condition1` is not a binary column, split on =/!= to condition1_value alternative: Specify the sidedness of the test: "two-sided", "less" or "greater" """ plot = sb.barplot( x=condition1, y=condition2, ax=ax, data=data, **kwargs ) plot.set_ylabel("Percent %s" % condition2) condition1_mask = get_condition_mask(data, condition1, condition1_value) count_table = pd.crosstab(data[condition1], data[condition2]) print(count_table) oddsratio, p_value = fisher_exact(count_table, alternative=alternative) add_significance_indicator(plot=plot, significant=p_value <= 0.05) only_percentage_ticks(plot) if alternative != "two-sided": raise ValueError("We need to better understand the one-sided Fisher's Exact test") sided_str = "two-sided" print("Fisher's Exact Test: OR: {}, p-value={} ({})".format(oddsratio, p_value, sided_str)) return FishersExactResults(oddsratio=oddsratio, p_value=p_value, sided_str=sided_str, with_condition1_series=data[condition1_mask][condition2], without_condition1_series=data[~condition1_mask][condition2], plot=plot)
python
def fishers_exact_plot(data, condition1, condition2, ax=None, condition1_value=None, alternative="two-sided", **kwargs): """ Perform a Fisher's exact test to compare to binary columns Parameters ---------- data: Pandas dataframe Dataframe to retrieve information from condition1: str First binary column to compare (and used for test sidedness) condition2: str Second binary column to compare ax : Axes, default None Axes to plot on condition1_value: If `condition1` is not a binary column, split on =/!= to condition1_value alternative: Specify the sidedness of the test: "two-sided", "less" or "greater" """ plot = sb.barplot( x=condition1, y=condition2, ax=ax, data=data, **kwargs ) plot.set_ylabel("Percent %s" % condition2) condition1_mask = get_condition_mask(data, condition1, condition1_value) count_table = pd.crosstab(data[condition1], data[condition2]) print(count_table) oddsratio, p_value = fisher_exact(count_table, alternative=alternative) add_significance_indicator(plot=plot, significant=p_value <= 0.05) only_percentage_ticks(plot) if alternative != "two-sided": raise ValueError("We need to better understand the one-sided Fisher's Exact test") sided_str = "two-sided" print("Fisher's Exact Test: OR: {}, p-value={} ({})".format(oddsratio, p_value, sided_str)) return FishersExactResults(oddsratio=oddsratio, p_value=p_value, sided_str=sided_str, with_condition1_series=data[condition1_mask][condition2], without_condition1_series=data[~condition1_mask][condition2], plot=plot)
[ "def", "fishers_exact_plot", "(", "data", ",", "condition1", ",", "condition2", ",", "ax", "=", "None", ",", "condition1_value", "=", "None", ",", "alternative", "=", "\"two-sided\"", ",", "*", "*", "kwargs", ")", ":", "plot", "=", "sb", ".", "barplot", "(", "x", "=", "condition1", ",", "y", "=", "condition2", ",", "ax", "=", "ax", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")", "plot", ".", "set_ylabel", "(", "\"Percent %s\"", "%", "condition2", ")", "condition1_mask", "=", "get_condition_mask", "(", "data", ",", "condition1", ",", "condition1_value", ")", "count_table", "=", "pd", ".", "crosstab", "(", "data", "[", "condition1", "]", ",", "data", "[", "condition2", "]", ")", "print", "(", "count_table", ")", "oddsratio", ",", "p_value", "=", "fisher_exact", "(", "count_table", ",", "alternative", "=", "alternative", ")", "add_significance_indicator", "(", "plot", "=", "plot", ",", "significant", "=", "p_value", "<=", "0.05", ")", "only_percentage_ticks", "(", "plot", ")", "if", "alternative", "!=", "\"two-sided\"", ":", "raise", "ValueError", "(", "\"We need to better understand the one-sided Fisher's Exact test\"", ")", "sided_str", "=", "\"two-sided\"", "print", "(", "\"Fisher's Exact Test: OR: {}, p-value={} ({})\"", ".", "format", "(", "oddsratio", ",", "p_value", ",", "sided_str", ")", ")", "return", "FishersExactResults", "(", "oddsratio", "=", "oddsratio", ",", "p_value", "=", "p_value", ",", "sided_str", "=", "sided_str", ",", "with_condition1_series", "=", "data", "[", "condition1_mask", "]", "[", "condition2", "]", ",", "without_condition1_series", "=", "data", "[", "~", "condition1_mask", "]", "[", "condition2", "]", ",", "plot", "=", "plot", ")" ]
Perform a Fisher's exact test to compare to binary columns Parameters ---------- data: Pandas dataframe Dataframe to retrieve information from condition1: str First binary column to compare (and used for test sidedness) condition2: str Second binary column to compare ax : Axes, default None Axes to plot on condition1_value: If `condition1` is not a binary column, split on =/!= to condition1_value alternative: Specify the sidedness of the test: "two-sided", "less" or "greater"
[ "Perform", "a", "Fisher", "s", "exact", "test", "to", "compare", "to", "binary", "columns" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L130-L182
hammerlab/cohorts
cohorts/plot.py
mann_whitney_plot
def mann_whitney_plot(data, condition, distribution, ax=None, condition_value=None, alternative="two-sided", skip_plot=False, **kwargs): """ Create a box plot comparing a condition and perform a Mann Whitney test to compare the distribution in condition A v B Parameters ---------- data: Pandas dataframe Dataframe to retrieve information from condition: str Column to use as the splitting criteria distribution: str Column to use as the Y-axis or distribution in the test ax : Axes, default None Axes to plot on condition_value: If `condition` is not a binary column, split on =/!= to condition_value alternative: Specify the sidedness of the Mann-Whitney test: "two-sided", "less" or "greater" skip_plot: Calculate the test statistic and p-value, but don't plot. """ condition_mask = get_condition_mask(data, condition, condition_value) U, p_value = mannwhitneyu( data[condition_mask][distribution], data[~condition_mask][distribution], alternative=alternative ) plot = None if not skip_plot: plot = stripboxplot( x=condition, y=distribution, data=data, ax=ax, significant=p_value <= 0.05, **kwargs ) sided_str = sided_str_from_alternative(alternative, condition) print("Mann-Whitney test: U={}, p-value={} ({})".format(U, p_value, sided_str)) return MannWhitneyResults(U=U, p_value=p_value, sided_str=sided_str, with_condition_series=data[condition_mask][distribution], without_condition_series=data[~condition_mask][distribution], plot=plot)
python
def mann_whitney_plot(data, condition, distribution, ax=None, condition_value=None, alternative="two-sided", skip_plot=False, **kwargs): """ Create a box plot comparing a condition and perform a Mann Whitney test to compare the distribution in condition A v B Parameters ---------- data: Pandas dataframe Dataframe to retrieve information from condition: str Column to use as the splitting criteria distribution: str Column to use as the Y-axis or distribution in the test ax : Axes, default None Axes to plot on condition_value: If `condition` is not a binary column, split on =/!= to condition_value alternative: Specify the sidedness of the Mann-Whitney test: "two-sided", "less" or "greater" skip_plot: Calculate the test statistic and p-value, but don't plot. """ condition_mask = get_condition_mask(data, condition, condition_value) U, p_value = mannwhitneyu( data[condition_mask][distribution], data[~condition_mask][distribution], alternative=alternative ) plot = None if not skip_plot: plot = stripboxplot( x=condition, y=distribution, data=data, ax=ax, significant=p_value <= 0.05, **kwargs ) sided_str = sided_str_from_alternative(alternative, condition) print("Mann-Whitney test: U={}, p-value={} ({})".format(U, p_value, sided_str)) return MannWhitneyResults(U=U, p_value=p_value, sided_str=sided_str, with_condition_series=data[condition_mask][distribution], without_condition_series=data[~condition_mask][distribution], plot=plot)
[ "def", "mann_whitney_plot", "(", "data", ",", "condition", ",", "distribution", ",", "ax", "=", "None", ",", "condition_value", "=", "None", ",", "alternative", "=", "\"two-sided\"", ",", "skip_plot", "=", "False", ",", "*", "*", "kwargs", ")", ":", "condition_mask", "=", "get_condition_mask", "(", "data", ",", "condition", ",", "condition_value", ")", "U", ",", "p_value", "=", "mannwhitneyu", "(", "data", "[", "condition_mask", "]", "[", "distribution", "]", ",", "data", "[", "~", "condition_mask", "]", "[", "distribution", "]", ",", "alternative", "=", "alternative", ")", "plot", "=", "None", "if", "not", "skip_plot", ":", "plot", "=", "stripboxplot", "(", "x", "=", "condition", ",", "y", "=", "distribution", ",", "data", "=", "data", ",", "ax", "=", "ax", ",", "significant", "=", "p_value", "<=", "0.05", ",", "*", "*", "kwargs", ")", "sided_str", "=", "sided_str_from_alternative", "(", "alternative", ",", "condition", ")", "print", "(", "\"Mann-Whitney test: U={}, p-value={} ({})\"", ".", "format", "(", "U", ",", "p_value", ",", "sided_str", ")", ")", "return", "MannWhitneyResults", "(", "U", "=", "U", ",", "p_value", "=", "p_value", ",", "sided_str", "=", "sided_str", ",", "with_condition_series", "=", "data", "[", "condition_mask", "]", "[", "distribution", "]", ",", "without_condition_series", "=", "data", "[", "~", "condition_mask", "]", "[", "distribution", "]", ",", "plot", "=", "plot", ")" ]
Create a box plot comparing a condition and perform a Mann Whitney test to compare the distribution in condition A v B Parameters ---------- data: Pandas dataframe Dataframe to retrieve information from condition: str Column to use as the splitting criteria distribution: str Column to use as the Y-axis or distribution in the test ax : Axes, default None Axes to plot on condition_value: If `condition` is not a binary column, split on =/!= to condition_value alternative: Specify the sidedness of the Mann-Whitney test: "two-sided", "less" or "greater" skip_plot: Calculate the test statistic and p-value, but don't plot.
[ "Create", "a", "box", "plot", "comparing", "a", "condition", "and", "perform", "a", "Mann", "Whitney", "test", "to", "compare", "the", "distribution", "in", "condition", "A", "v", "B" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L192-L253
hammerlab/cohorts
cohorts/plot.py
roc_curve_plot
def roc_curve_plot(data, value_column, outcome_column, bootstrap_samples=100, ax=None): """Create a ROC curve and compute the bootstrap AUC for the given variable and outcome Parameters ---------- data : Pandas dataframe Dataframe to retrieve information from value_column : str Column to retrieve the values from outcome_column : str Column to use as the outcome variable bootstrap_samples : int, optional Number of bootstrap samples to use to compute the AUC ax : Axes, default None Axes to plot on Returns ------- (mean_bootstrap_auc, roc_plot) : (float, matplotlib plot) Mean AUC for the given number of bootstrap samples and the plot """ scores = bootstrap_auc(df=data, col=value_column, pred_col=outcome_column, n_bootstrap=bootstrap_samples) mean_bootstrap_auc = scores.mean() print("{}, Bootstrap (samples = {}) AUC:{}, std={}".format( value_column, bootstrap_samples, mean_bootstrap_auc, scores.std())) outcome = data[outcome_column].astype(int) values = data[value_column] fpr, tpr, thresholds = roc_curve(outcome, values) if ax is None: ax = plt.gca() roc_plot = ax.plot(fpr, tpr, lw=1, label=value_column) ax.set_xlim([-0.05, 1.05]) ax.set_ylim([-0.05, 1.05]) ax.set_xlabel('False Positive Rate') ax.set_ylabel('True Positive Rate') ax.legend(loc=2, borderaxespad=0.) ax.set_title('{} ROC Curve (n={})'.format(value_column, len(values))) return (mean_bootstrap_auc, roc_plot)
python
def roc_curve_plot(data, value_column, outcome_column, bootstrap_samples=100, ax=None): """Create a ROC curve and compute the bootstrap AUC for the given variable and outcome Parameters ---------- data : Pandas dataframe Dataframe to retrieve information from value_column : str Column to retrieve the values from outcome_column : str Column to use as the outcome variable bootstrap_samples : int, optional Number of bootstrap samples to use to compute the AUC ax : Axes, default None Axes to plot on Returns ------- (mean_bootstrap_auc, roc_plot) : (float, matplotlib plot) Mean AUC for the given number of bootstrap samples and the plot """ scores = bootstrap_auc(df=data, col=value_column, pred_col=outcome_column, n_bootstrap=bootstrap_samples) mean_bootstrap_auc = scores.mean() print("{}, Bootstrap (samples = {}) AUC:{}, std={}".format( value_column, bootstrap_samples, mean_bootstrap_auc, scores.std())) outcome = data[outcome_column].astype(int) values = data[value_column] fpr, tpr, thresholds = roc_curve(outcome, values) if ax is None: ax = plt.gca() roc_plot = ax.plot(fpr, tpr, lw=1, label=value_column) ax.set_xlim([-0.05, 1.05]) ax.set_ylim([-0.05, 1.05]) ax.set_xlabel('False Positive Rate') ax.set_ylabel('True Positive Rate') ax.legend(loc=2, borderaxespad=0.) ax.set_title('{} ROC Curve (n={})'.format(value_column, len(values))) return (mean_bootstrap_auc, roc_plot)
[ "def", "roc_curve_plot", "(", "data", ",", "value_column", ",", "outcome_column", ",", "bootstrap_samples", "=", "100", ",", "ax", "=", "None", ")", ":", "scores", "=", "bootstrap_auc", "(", "df", "=", "data", ",", "col", "=", "value_column", ",", "pred_col", "=", "outcome_column", ",", "n_bootstrap", "=", "bootstrap_samples", ")", "mean_bootstrap_auc", "=", "scores", ".", "mean", "(", ")", "print", "(", "\"{}, Bootstrap (samples = {}) AUC:{}, std={}\"", ".", "format", "(", "value_column", ",", "bootstrap_samples", ",", "mean_bootstrap_auc", ",", "scores", ".", "std", "(", ")", ")", ")", "outcome", "=", "data", "[", "outcome_column", "]", ".", "astype", "(", "int", ")", "values", "=", "data", "[", "value_column", "]", "fpr", ",", "tpr", ",", "thresholds", "=", "roc_curve", "(", "outcome", ",", "values", ")", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", ")", "roc_plot", "=", "ax", ".", "plot", "(", "fpr", ",", "tpr", ",", "lw", "=", "1", ",", "label", "=", "value_column", ")", "ax", ".", "set_xlim", "(", "[", "-", "0.05", ",", "1.05", "]", ")", "ax", ".", "set_ylim", "(", "[", "-", "0.05", ",", "1.05", "]", ")", "ax", ".", "set_xlabel", "(", "'False Positive Rate'", ")", "ax", ".", "set_ylabel", "(", "'True Positive Rate'", ")", "ax", ".", "legend", "(", "loc", "=", "2", ",", "borderaxespad", "=", "0.", ")", "ax", ".", "set_title", "(", "'{} ROC Curve (n={})'", ".", "format", "(", "value_column", ",", "len", "(", "values", ")", ")", ")", "return", "(", "mean_bootstrap_auc", ",", "roc_plot", ")" ]
Create a ROC curve and compute the bootstrap AUC for the given variable and outcome Parameters ---------- data : Pandas dataframe Dataframe to retrieve information from value_column : str Column to retrieve the values from outcome_column : str Column to use as the outcome variable bootstrap_samples : int, optional Number of bootstrap samples to use to compute the AUC ax : Axes, default None Axes to plot on Returns ------- (mean_bootstrap_auc, roc_plot) : (float, matplotlib plot) Mean AUC for the given number of bootstrap samples and the plot
[ "Create", "a", "ROC", "curve", "and", "compute", "the", "bootstrap", "AUC", "for", "the", "given", "variable", "and", "outcome" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L263-L308
hammerlab/cohorts
cohorts/utils.py
get_cache_dir
def get_cache_dir(cache_dir, cache_root_dir=None, *args, **kwargs): """ Return full cache_dir, according to following logic: - if cache_dir is a full path (per path.isabs), return that value - if not and if cache_root_dir is not None, join two paths - otherwise, log warnings and return None Separately, if args or kwargs are given, format cache_dir using kwargs """ cache_dir = cache_dir.format(*args, **kwargs) if path.isabs(cache_dir): if cache_root_dir is not None: logger.warning('cache_dir ({}) is a full path; ignoring cache_root_dir'.format(cache_dir)) return cache_dir if cache_root_dir is not None: return path.join(cache_root_dir, cache_dir) else: logger.warning("cache dir is not full path & cache_root_dir not given. Caching may not work as expected!") return None
python
def get_cache_dir(cache_dir, cache_root_dir=None, *args, **kwargs): """ Return full cache_dir, according to following logic: - if cache_dir is a full path (per path.isabs), return that value - if not and if cache_root_dir is not None, join two paths - otherwise, log warnings and return None Separately, if args or kwargs are given, format cache_dir using kwargs """ cache_dir = cache_dir.format(*args, **kwargs) if path.isabs(cache_dir): if cache_root_dir is not None: logger.warning('cache_dir ({}) is a full path; ignoring cache_root_dir'.format(cache_dir)) return cache_dir if cache_root_dir is not None: return path.join(cache_root_dir, cache_dir) else: logger.warning("cache dir is not full path & cache_root_dir not given. Caching may not work as expected!") return None
[ "def", "get_cache_dir", "(", "cache_dir", ",", "cache_root_dir", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cache_dir", "=", "cache_dir", ".", "format", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "path", ".", "isabs", "(", "cache_dir", ")", ":", "if", "cache_root_dir", "is", "not", "None", ":", "logger", ".", "warning", "(", "'cache_dir ({}) is a full path; ignoring cache_root_dir'", ".", "format", "(", "cache_dir", ")", ")", "return", "cache_dir", "if", "cache_root_dir", "is", "not", "None", ":", "return", "path", ".", "join", "(", "cache_root_dir", ",", "cache_dir", ")", "else", ":", "logger", ".", "warning", "(", "\"cache dir is not full path & cache_root_dir not given. Caching may not work as expected!\"", ")", "return", "None" ]
Return full cache_dir, according to following logic: - if cache_dir is a full path (per path.isabs), return that value - if not and if cache_root_dir is not None, join two paths - otherwise, log warnings and return None Separately, if args or kwargs are given, format cache_dir using kwargs
[ "Return", "full", "cache_dir", "according", "to", "following", "logic", ":", "-", "if", "cache_dir", "is", "a", "full", "path", "(", "per", "path", ".", "isabs", ")", "return", "that", "value", "-", "if", "not", "and", "if", "cache_root_dir", "is", "not", "None", "join", "two", "paths", "-", "otherwise", "log", "warnings", "and", "return", "None", "Separately", "if", "args", "or", "kwargs", "are", "given", "format", "cache_dir", "using", "kwargs" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/utils.py#L24-L41
hammerlab/cohorts
cohorts/utils.py
_strip_column_name
def _strip_column_name(col_name, keep_paren_contents=True): """ Utility script applying several regexs to a string. Intended to be used by `strip_column_names`. This function will: 1. replace informative punctuation components with text 2. (optionally) remove text within parentheses 3. replace remaining punctuation/whitespace with _ 4. strip leading/trailing punctuation/whitespace Parameters ---------- col_name (str): input character string keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns -------- modified string for new field name Examples -------- > print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']]) """ # start with input new_col_name = col_name # replace meaningful punctuation with text equivalents # surround each with whitespace to enforce consistent use of _ punctuation_to_text = { '<=': 'le', '>=': 'ge', '=<': 'le', '=>': 'ge', '<': 'lt', '>': 'gt', '#': 'num' } for punctuation, punctuation_text in punctuation_to_text.items(): new_col_name = new_col_name.replace(punctuation, punctuation_text) # remove contents within () if not(keep_paren_contents): new_col_name = re.sub('\([^)]*\)', '', new_col_name) # replace remaining punctuation/whitespace with _ punct_pattern = '[\W_]+' punct_replacement = '_' new_col_name = re.sub(punct_pattern, punct_replacement, new_col_name) # remove leading/trailing _ if it exists (if last char was punctuation) new_col_name = new_col_name.strip("_") # TODO: check for empty string # return lower-case version of column name return new_col_name.lower()
python
def _strip_column_name(col_name, keep_paren_contents=True): """ Utility script applying several regexs to a string. Intended to be used by `strip_column_names`. This function will: 1. replace informative punctuation components with text 2. (optionally) remove text within parentheses 3. replace remaining punctuation/whitespace with _ 4. strip leading/trailing punctuation/whitespace Parameters ---------- col_name (str): input character string keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns -------- modified string for new field name Examples -------- > print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']]) """ # start with input new_col_name = col_name # replace meaningful punctuation with text equivalents # surround each with whitespace to enforce consistent use of _ punctuation_to_text = { '<=': 'le', '>=': 'ge', '=<': 'le', '=>': 'ge', '<': 'lt', '>': 'gt', '#': 'num' } for punctuation, punctuation_text in punctuation_to_text.items(): new_col_name = new_col_name.replace(punctuation, punctuation_text) # remove contents within () if not(keep_paren_contents): new_col_name = re.sub('\([^)]*\)', '', new_col_name) # replace remaining punctuation/whitespace with _ punct_pattern = '[\W_]+' punct_replacement = '_' new_col_name = re.sub(punct_pattern, punct_replacement, new_col_name) # remove leading/trailing _ if it exists (if last char was punctuation) new_col_name = new_col_name.strip("_") # TODO: check for empty string # return lower-case version of column name return new_col_name.lower()
[ "def", "_strip_column_name", "(", "col_name", ",", "keep_paren_contents", "=", "True", ")", ":", "# start with input", "new_col_name", "=", "col_name", "# replace meaningful punctuation with text equivalents", "# surround each with whitespace to enforce consistent use of _", "punctuation_to_text", "=", "{", "'<='", ":", "'le'", ",", "'>='", ":", "'ge'", ",", "'=<'", ":", "'le'", ",", "'=>'", ":", "'ge'", ",", "'<'", ":", "'lt'", ",", "'>'", ":", "'gt'", ",", "'#'", ":", "'num'", "}", "for", "punctuation", ",", "punctuation_text", "in", "punctuation_to_text", ".", "items", "(", ")", ":", "new_col_name", "=", "new_col_name", ".", "replace", "(", "punctuation", ",", "punctuation_text", ")", "# remove contents within ()", "if", "not", "(", "keep_paren_contents", ")", ":", "new_col_name", "=", "re", ".", "sub", "(", "'\\([^)]*\\)'", ",", "''", ",", "new_col_name", ")", "# replace remaining punctuation/whitespace with _", "punct_pattern", "=", "'[\\W_]+'", "punct_replacement", "=", "'_'", "new_col_name", "=", "re", ".", "sub", "(", "punct_pattern", ",", "punct_replacement", ",", "new_col_name", ")", "# remove leading/trailing _ if it exists (if last char was punctuation)", "new_col_name", "=", "new_col_name", ".", "strip", "(", "\"_\"", ")", "# TODO: check for empty string", "# return lower-case version of column name", "return", "new_col_name", ".", "lower", "(", ")" ]
Utility script applying several regexs to a string. Intended to be used by `strip_column_names`. This function will: 1. replace informative punctuation components with text 2. (optionally) remove text within parentheses 3. replace remaining punctuation/whitespace with _ 4. strip leading/trailing punctuation/whitespace Parameters ---------- col_name (str): input character string keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns -------- modified string for new field name Examples -------- > print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']])
[ "Utility", "script", "applying", "several", "regexs", "to", "a", "string", ".", "Intended", "to", "be", "used", "by", "strip_column_names", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/utils.py#L84-L142
hammerlab/cohorts
cohorts/utils.py
strip_column_names
def strip_column_names(cols, keep_paren_contents=True): """ Utility script for renaming pandas columns to patsy-friendly names. Revised names have been: - stripped of all punctuation and whitespace (converted to text or `_`) - converted to lower case Takes a list of column names, returns a dict mapping names to revised names. If there are any concerns with the conversion, this will print a warning & return original column names. Parameters ---------- cols (list): list of strings containing column names keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns ------- dict mapping col_names -> new_col_names Example ------- > df = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']), 'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']), 'PD L1 (value)': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']), 'PD L1 (>1)': pd.Series([0., 1., 1., 0.], index=['a', 'b', 'c', 'd']), } > df = pd.DataFrame(df) > df = df.rename(columns = strip_column_names(df.columns)) ## observe, by comparison > df2 = df.rename(columns = strip_column_names(df.columns, keep_paren_contents=False)) """ # strip/replace punctuation new_cols = [ _strip_column_name(col, keep_paren_contents=keep_paren_contents) for col in cols] if len(new_cols) != len(set(new_cols)): warn_str = 'Warning: strip_column_names (if run) would introduce duplicate names.' warn_str += ' Reverting column names to the original.' warnings.warn(warn_str, Warning) print('Warning: strip_column_names would introduce duplicate names. Please fix & try again.') return dict(zip(cols, cols)) return dict(zip(cols, new_cols))
python
def strip_column_names(cols, keep_paren_contents=True): """ Utility script for renaming pandas columns to patsy-friendly names. Revised names have been: - stripped of all punctuation and whitespace (converted to text or `_`) - converted to lower case Takes a list of column names, returns a dict mapping names to revised names. If there are any concerns with the conversion, this will print a warning & return original column names. Parameters ---------- cols (list): list of strings containing column names keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns ------- dict mapping col_names -> new_col_names Example ------- > df = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']), 'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']), 'PD L1 (value)': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']), 'PD L1 (>1)': pd.Series([0., 1., 1., 0.], index=['a', 'b', 'c', 'd']), } > df = pd.DataFrame(df) > df = df.rename(columns = strip_column_names(df.columns)) ## observe, by comparison > df2 = df.rename(columns = strip_column_names(df.columns, keep_paren_contents=False)) """ # strip/replace punctuation new_cols = [ _strip_column_name(col, keep_paren_contents=keep_paren_contents) for col in cols] if len(new_cols) != len(set(new_cols)): warn_str = 'Warning: strip_column_names (if run) would introduce duplicate names.' warn_str += ' Reverting column names to the original.' warnings.warn(warn_str, Warning) print('Warning: strip_column_names would introduce duplicate names. Please fix & try again.') return dict(zip(cols, cols)) return dict(zip(cols, new_cols))
[ "def", "strip_column_names", "(", "cols", ",", "keep_paren_contents", "=", "True", ")", ":", "# strip/replace punctuation", "new_cols", "=", "[", "_strip_column_name", "(", "col", ",", "keep_paren_contents", "=", "keep_paren_contents", ")", "for", "col", "in", "cols", "]", "if", "len", "(", "new_cols", ")", "!=", "len", "(", "set", "(", "new_cols", ")", ")", ":", "warn_str", "=", "'Warning: strip_column_names (if run) would introduce duplicate names.'", "warn_str", "+=", "' Reverting column names to the original.'", "warnings", ".", "warn", "(", "warn_str", ",", "Warning", ")", "print", "(", "'Warning: strip_column_names would introduce duplicate names. Please fix & try again.'", ")", "return", "dict", "(", "zip", "(", "cols", ",", "cols", ")", ")", "return", "dict", "(", "zip", "(", "cols", ",", "new_cols", ")", ")" ]
Utility script for renaming pandas columns to patsy-friendly names. Revised names have been: - stripped of all punctuation and whitespace (converted to text or `_`) - converted to lower case Takes a list of column names, returns a dict mapping names to revised names. If there are any concerns with the conversion, this will print a warning & return original column names. Parameters ---------- cols (list): list of strings containing column names keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns ------- dict mapping col_names -> new_col_names Example ------- > df = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']), 'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']), 'PD L1 (value)': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']), 'PD L1 (>1)': pd.Series([0., 1., 1., 0.], index=['a', 'b', 'c', 'd']), } > df = pd.DataFrame(df) > df = df.rename(columns = strip_column_names(df.columns)) ## observe, by comparison > df2 = df.rename(columns = strip_column_names(df.columns, keep_paren_contents=False))
[ "Utility", "script", "for", "renaming", "pandas", "columns", "to", "patsy", "-", "friendly", "names", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/utils.py#L144-L201
hammerlab/cohorts
cohorts/utils.py
set_attributes
def set_attributes(obj, additional_data): """ Given an object and a dictionary, give the object new attributes from that dictionary. Uses _strip_column_name to git rid of whitespace/uppercase/special characters. """ for key, value in additional_data.items(): if hasattr(obj, key): raise ValueError("Key %s in additional_data already exists in this object" % key) setattr(obj, _strip_column_name(key), value)
python
def set_attributes(obj, additional_data): """ Given an object and a dictionary, give the object new attributes from that dictionary. Uses _strip_column_name to git rid of whitespace/uppercase/special characters. """ for key, value in additional_data.items(): if hasattr(obj, key): raise ValueError("Key %s in additional_data already exists in this object" % key) setattr(obj, _strip_column_name(key), value)
[ "def", "set_attributes", "(", "obj", ",", "additional_data", ")", ":", "for", "key", ",", "value", "in", "additional_data", ".", "items", "(", ")", ":", "if", "hasattr", "(", "obj", ",", "key", ")", ":", "raise", "ValueError", "(", "\"Key %s in additional_data already exists in this object\"", "%", "key", ")", "setattr", "(", "obj", ",", "_strip_column_name", "(", "key", ")", ",", "value", ")" ]
Given an object and a dictionary, give the object new attributes from that dictionary. Uses _strip_column_name to git rid of whitespace/uppercase/special characters.
[ "Given", "an", "object", "and", "a", "dictionary", "give", "the", "object", "new", "attributes", "from", "that", "dictionary", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/utils.py#L210-L219
hammerlab/cohorts
cohorts/utils.py
DataFrameHolder.return_obj
def return_obj(cols, df, return_cols=False): """Construct a DataFrameHolder and then return either that or the DataFrame.""" df_holder = DataFrameHolder(cols=cols, df=df) return df_holder.return_self(return_cols=return_cols)
python
def return_obj(cols, df, return_cols=False): """Construct a DataFrameHolder and then return either that or the DataFrame.""" df_holder = DataFrameHolder(cols=cols, df=df) return df_holder.return_self(return_cols=return_cols)
[ "def", "return_obj", "(", "cols", ",", "df", ",", "return_cols", "=", "False", ")", ":", "df_holder", "=", "DataFrameHolder", "(", "cols", "=", "cols", ",", "df", "=", "df", ")", "return", "df_holder", ".", "return_self", "(", "return_cols", "=", "return_cols", ")" ]
Construct a DataFrameHolder and then return either that or the DataFrame.
[ "Construct", "a", "DataFrameHolder", "and", "then", "return", "either", "that", "or", "the", "DataFrame", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/utils.py#L54-L57
hammerlab/cohorts
cohorts/provenance.py
compare_provenance
def compare_provenance( this_provenance, other_provenance, left_outer_diff = "In current but not comparison", right_outer_diff = "In comparison but not current"): """Utility function to compare two abritrary provenance dicts returns number of discrepancies. Parameters ---------- this_provenance: provenance dict (to be compared to "other_provenance") other_provenance: comparison provenance dict (optional) left_outer_diff: description/prefix used when printing items in this_provenance but not in other_provenance right_outer_diff: description/prefix used when printing items in other_provenance but not in this_provenance Returns ----------- Number of discrepancies (0: None) """ ## if either this or other items is null, return 0 if (not this_provenance or not other_provenance): return 0 this_items = set(this_provenance.items()) other_items = set(other_provenance.items()) # Two-way diff: are any modules introduced, and are any modules lost? new_diff = this_items.difference(other_items) old_diff = other_items.difference(this_items) warn_str = "" if len(new_diff) > 0: warn_str += "%s: %s" % ( left_outer_diff, _provenance_str(new_diff)) if len(old_diff) > 0: warn_str += "%s: %s" % ( right_outer_diff, _provenance_str(old_diff)) if len(warn_str) > 0: warnings.warn(warn_str, Warning) return(len(new_diff)+len(old_diff))
python
def compare_provenance( this_provenance, other_provenance, left_outer_diff = "In current but not comparison", right_outer_diff = "In comparison but not current"): """Utility function to compare two abritrary provenance dicts returns number of discrepancies. Parameters ---------- this_provenance: provenance dict (to be compared to "other_provenance") other_provenance: comparison provenance dict (optional) left_outer_diff: description/prefix used when printing items in this_provenance but not in other_provenance right_outer_diff: description/prefix used when printing items in other_provenance but not in this_provenance Returns ----------- Number of discrepancies (0: None) """ ## if either this or other items is null, return 0 if (not this_provenance or not other_provenance): return 0 this_items = set(this_provenance.items()) other_items = set(other_provenance.items()) # Two-way diff: are any modules introduced, and are any modules lost? new_diff = this_items.difference(other_items) old_diff = other_items.difference(this_items) warn_str = "" if len(new_diff) > 0: warn_str += "%s: %s" % ( left_outer_diff, _provenance_str(new_diff)) if len(old_diff) > 0: warn_str += "%s: %s" % ( right_outer_diff, _provenance_str(old_diff)) if len(warn_str) > 0: warnings.warn(warn_str, Warning) return(len(new_diff)+len(old_diff))
[ "def", "compare_provenance", "(", "this_provenance", ",", "other_provenance", ",", "left_outer_diff", "=", "\"In current but not comparison\"", ",", "right_outer_diff", "=", "\"In comparison but not current\"", ")", ":", "## if either this or other items is null, return 0", "if", "(", "not", "this_provenance", "or", "not", "other_provenance", ")", ":", "return", "0", "this_items", "=", "set", "(", "this_provenance", ".", "items", "(", ")", ")", "other_items", "=", "set", "(", "other_provenance", ".", "items", "(", ")", ")", "# Two-way diff: are any modules introduced, and are any modules lost?", "new_diff", "=", "this_items", ".", "difference", "(", "other_items", ")", "old_diff", "=", "other_items", ".", "difference", "(", "this_items", ")", "warn_str", "=", "\"\"", "if", "len", "(", "new_diff", ")", ">", "0", ":", "warn_str", "+=", "\"%s: %s\"", "%", "(", "left_outer_diff", ",", "_provenance_str", "(", "new_diff", ")", ")", "if", "len", "(", "old_diff", ")", ">", "0", ":", "warn_str", "+=", "\"%s: %s\"", "%", "(", "right_outer_diff", ",", "_provenance_str", "(", "old_diff", ")", ")", "if", "len", "(", "warn_str", ")", ">", "0", ":", "warnings", ".", "warn", "(", "warn_str", ",", "Warning", ")", "return", "(", "len", "(", "new_diff", ")", "+", "len", "(", "old_diff", ")", ")" ]
Utility function to compare two abritrary provenance dicts returns number of discrepancies. Parameters ---------- this_provenance: provenance dict (to be compared to "other_provenance") other_provenance: comparison provenance dict (optional) left_outer_diff: description/prefix used when printing items in this_provenance but not in other_provenance right_outer_diff: description/prefix used when printing items in other_provenance but not in this_provenance Returns ----------- Number of discrepancies (0: None)
[ "Utility", "function", "to", "compare", "two", "abritrary", "provenance", "dicts", "returns", "number", "of", "discrepancies", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/provenance.py#L22-L65
hammerlab/cohorts
cohorts/survival.py
_plot_kmf_single
def _plot_kmf_single(df, condition_col, survival_col, censor_col, threshold, title, xlabel, ylabel, ax, with_condition_color, no_condition_color, with_condition_label, no_condition_label, color_map, label_map, color_palette, ci_show, print_as_title): """ Helper function to produce a single KM survival plot, among observations in df by groups defined by condition_col. All inputs are required - this function is intended to be called by `plot_kmf`. """ # make color inputs consistent hex format if colors.is_color_like(with_condition_color): with_condition_color = colors.to_hex(with_condition_color) if colors.is_color_like(no_condition_color): no_condition_color = colors.to_hex(no_condition_color) ## prepare data to be plotted; producing 3 outputs: # - `condition`, series containing category labels to be plotted # - `label_map` (mapping condition values to plot labels) # - `color_map` (mapping condition values to plotted colors) if threshold is not None: is_median = threshold == "median" if is_median: threshold = df[condition_col].median() label_suffix = float_str(threshold) condition = df[condition_col] > threshold default_label_no_condition = "%s ≤ %s" % (condition_col, label_suffix) if is_median: label_suffix += " (median)" default_label_with_condition = "%s > %s" % (condition_col, label_suffix) with_condition_label = with_condition_label or default_label_with_condition no_condition_label = no_condition_label or default_label_no_condition if not label_map: label_map = {False: no_condition_label, True: with_condition_label} if not color_map: color_map = {False: no_condition_color, True: with_condition_color} elif df[condition_col].dtype == 'O' or df[condition_col].dtype.name == "category": condition = df[condition_col].astype("category") if not label_map: label_map = dict() [label_map.update({condition_value: '{} = {}'.format(condition_col, condition_value)}) for condition_value in condition.unique()] if not color_map: rgb_values = sb.color_palette(color_palette, len(label_map.keys())) hex_values = [colors.to_hex(col) for col in rgb_values] color_map = dict(zip(label_map.keys(), hex_values)) elif df[condition_col].dtype == 'bool': condition = df[condition_col] default_label_with_condition = "= {}".format(condition_col) default_label_no_condition = "¬ {}".format(condition_col) with_condition_label = with_condition_label or default_label_with_condition no_condition_label = no_condition_label or default_label_no_condition if not label_map: label_map = {False: no_condition_label, True: with_condition_label} if not color_map: color_map = {False: no_condition_color, True: with_condition_color} else: raise ValueError('Don\'t know how to plot data of type\ {}'.format(df[condition_col].dtype)) # produce kmf plot for each category (group) identified above kmf = KaplanMeierFitter() grp_desc = list() grp_survival_data = dict() grp_event_data = dict() grp_names = list(condition.unique()) for grp_name, grp_df in df.groupby(condition): grp_survival = grp_df[survival_col] grp_event = (grp_df[censor_col].astype(bool)) grp_label = label_map[grp_name] grp_color = color_map[grp_name] kmf.fit(grp_survival, grp_event, label=grp_label) desc_str = "# {}: {}".format(grp_label, len(grp_survival)) grp_desc.append(desc_str) grp_survival_data[grp_name] = grp_survival grp_event_data[grp_name] = grp_event if ax: ax = kmf.plot(ax=ax, show_censors=True, ci_show=ci_show, color=grp_color) else: ax = kmf.plot(show_censors=True, ci_show=ci_show, color=grp_color) ## format the plot # Set the y-axis to range 0 to 1 ax.set_ylim(0, 1) y_tick_vals = ax.get_yticks() ax.set_yticklabels(["%d" % int(y_tick_val * 100) for y_tick_val in y_tick_vals]) # plot title if title: ax.set_title(title) elif print_as_title: ax.set_title(' | '.join(grp_desc)) else: [print(desc) for desc in grp_desc] # axis labels if xlabel: ax.set_xlabel(xlabel) if ylabel: ax.set_ylabel(ylabel) ## summarize analytical version of results ## again using same groups as are plotted if len(grp_names) == 2: # use log-rank test for 2 groups results = logrank_test(grp_survival_data[grp_names[0]], grp_survival_data[grp_names[1]], event_observed_A=grp_event_data[grp_names[0]], event_observed_B=grp_event_data[grp_names[1]]) elif len(grp_names) == 1: # no analytical result for 1 or 0 groups results = NullSurvivalResults() else: # cox PH fitter for >2 groups cf = CoxPHFitter() cox_df = patsy.dmatrix('+'.join([condition_col, survival_col, censor_col]), df, return_type='dataframe') del cox_df['Intercept'] results = cf.fit(cox_df, survival_col, event_col=censor_col) results.print_summary() # add metadata to results object so caller can print them results.survival_data_series = grp_survival_data results.event_data_series = grp_event_data results.desc = grp_desc return results
python
def _plot_kmf_single(df, condition_col, survival_col, censor_col, threshold, title, xlabel, ylabel, ax, with_condition_color, no_condition_color, with_condition_label, no_condition_label, color_map, label_map, color_palette, ci_show, print_as_title): """ Helper function to produce a single KM survival plot, among observations in df by groups defined by condition_col. All inputs are required - this function is intended to be called by `plot_kmf`. """ # make color inputs consistent hex format if colors.is_color_like(with_condition_color): with_condition_color = colors.to_hex(with_condition_color) if colors.is_color_like(no_condition_color): no_condition_color = colors.to_hex(no_condition_color) ## prepare data to be plotted; producing 3 outputs: # - `condition`, series containing category labels to be plotted # - `label_map` (mapping condition values to plot labels) # - `color_map` (mapping condition values to plotted colors) if threshold is not None: is_median = threshold == "median" if is_median: threshold = df[condition_col].median() label_suffix = float_str(threshold) condition = df[condition_col] > threshold default_label_no_condition = "%s ≤ %s" % (condition_col, label_suffix) if is_median: label_suffix += " (median)" default_label_with_condition = "%s > %s" % (condition_col, label_suffix) with_condition_label = with_condition_label or default_label_with_condition no_condition_label = no_condition_label or default_label_no_condition if not label_map: label_map = {False: no_condition_label, True: with_condition_label} if not color_map: color_map = {False: no_condition_color, True: with_condition_color} elif df[condition_col].dtype == 'O' or df[condition_col].dtype.name == "category": condition = df[condition_col].astype("category") if not label_map: label_map = dict() [label_map.update({condition_value: '{} = {}'.format(condition_col, condition_value)}) for condition_value in condition.unique()] if not color_map: rgb_values = sb.color_palette(color_palette, len(label_map.keys())) hex_values = [colors.to_hex(col) for col in rgb_values] color_map = dict(zip(label_map.keys(), hex_values)) elif df[condition_col].dtype == 'bool': condition = df[condition_col] default_label_with_condition = "= {}".format(condition_col) default_label_no_condition = "¬ {}".format(condition_col) with_condition_label = with_condition_label or default_label_with_condition no_condition_label = no_condition_label or default_label_no_condition if not label_map: label_map = {False: no_condition_label, True: with_condition_label} if not color_map: color_map = {False: no_condition_color, True: with_condition_color} else: raise ValueError('Don\'t know how to plot data of type\ {}'.format(df[condition_col].dtype)) # produce kmf plot for each category (group) identified above kmf = KaplanMeierFitter() grp_desc = list() grp_survival_data = dict() grp_event_data = dict() grp_names = list(condition.unique()) for grp_name, grp_df in df.groupby(condition): grp_survival = grp_df[survival_col] grp_event = (grp_df[censor_col].astype(bool)) grp_label = label_map[grp_name] grp_color = color_map[grp_name] kmf.fit(grp_survival, grp_event, label=grp_label) desc_str = "# {}: {}".format(grp_label, len(grp_survival)) grp_desc.append(desc_str) grp_survival_data[grp_name] = grp_survival grp_event_data[grp_name] = grp_event if ax: ax = kmf.plot(ax=ax, show_censors=True, ci_show=ci_show, color=grp_color) else: ax = kmf.plot(show_censors=True, ci_show=ci_show, color=grp_color) ## format the plot # Set the y-axis to range 0 to 1 ax.set_ylim(0, 1) y_tick_vals = ax.get_yticks() ax.set_yticklabels(["%d" % int(y_tick_val * 100) for y_tick_val in y_tick_vals]) # plot title if title: ax.set_title(title) elif print_as_title: ax.set_title(' | '.join(grp_desc)) else: [print(desc) for desc in grp_desc] # axis labels if xlabel: ax.set_xlabel(xlabel) if ylabel: ax.set_ylabel(ylabel) ## summarize analytical version of results ## again using same groups as are plotted if len(grp_names) == 2: # use log-rank test for 2 groups results = logrank_test(grp_survival_data[grp_names[0]], grp_survival_data[grp_names[1]], event_observed_A=grp_event_data[grp_names[0]], event_observed_B=grp_event_data[grp_names[1]]) elif len(grp_names) == 1: # no analytical result for 1 or 0 groups results = NullSurvivalResults() else: # cox PH fitter for >2 groups cf = CoxPHFitter() cox_df = patsy.dmatrix('+'.join([condition_col, survival_col, censor_col]), df, return_type='dataframe') del cox_df['Intercept'] results = cf.fit(cox_df, survival_col, event_col=censor_col) results.print_summary() # add metadata to results object so caller can print them results.survival_data_series = grp_survival_data results.event_data_series = grp_event_data results.desc = grp_desc return results
[ "def", "_plot_kmf_single", "(", "df", ",", "condition_col", ",", "survival_col", ",", "censor_col", ",", "threshold", ",", "title", ",", "xlabel", ",", "ylabel", ",", "ax", ",", "with_condition_color", ",", "no_condition_color", ",", "with_condition_label", ",", "no_condition_label", ",", "color_map", ",", "label_map", ",", "color_palette", ",", "ci_show", ",", "print_as_title", ")", ":", "# make color inputs consistent hex format", "if", "colors", ".", "is_color_like", "(", "with_condition_color", ")", ":", "with_condition_color", "=", "colors", ".", "to_hex", "(", "with_condition_color", ")", "if", "colors", ".", "is_color_like", "(", "no_condition_color", ")", ":", "no_condition_color", "=", "colors", ".", "to_hex", "(", "no_condition_color", ")", "## prepare data to be plotted; producing 3 outputs:", "# - `condition`, series containing category labels to be plotted", "# - `label_map` (mapping condition values to plot labels)", "# - `color_map` (mapping condition values to plotted colors)", "if", "threshold", "is", "not", "None", ":", "is_median", "=", "threshold", "==", "\"median\"", "if", "is_median", ":", "threshold", "=", "df", "[", "condition_col", "]", ".", "median", "(", ")", "label_suffix", "=", "float_str", "(", "threshold", ")", "condition", "=", "df", "[", "condition_col", "]", ">", "threshold", "default_label_no_condition", "=", "\"%s ≤ %s\" %", "(", "o", "ndition_col, ", "l", "bel_suffix)", "", "if", "is_median", ":", "label_suffix", "+=", "\" (median)\"", "default_label_with_condition", "=", "\"%s > %s\"", "%", "(", "condition_col", ",", "label_suffix", ")", "with_condition_label", "=", "with_condition_label", "or", "default_label_with_condition", "no_condition_label", "=", "no_condition_label", "or", "default_label_no_condition", "if", "not", "label_map", ":", "label_map", "=", "{", "False", ":", "no_condition_label", ",", "True", ":", "with_condition_label", "}", "if", "not", "color_map", ":", "color_map", "=", "{", "False", ":", "no_condition_color", ",", "True", ":", "with_condition_color", "}", "elif", "df", "[", "condition_col", "]", ".", "dtype", "==", "'O'", "or", "df", "[", "condition_col", "]", ".", "dtype", ".", "name", "==", "\"category\"", ":", "condition", "=", "df", "[", "condition_col", "]", ".", "astype", "(", "\"category\"", ")", "if", "not", "label_map", ":", "label_map", "=", "dict", "(", ")", "[", "label_map", ".", "update", "(", "{", "condition_value", ":", "'{} = {}'", ".", "format", "(", "condition_col", ",", "condition_value", ")", "}", ")", "for", "condition_value", "in", "condition", ".", "unique", "(", ")", "]", "if", "not", "color_map", ":", "rgb_values", "=", "sb", ".", "color_palette", "(", "color_palette", ",", "len", "(", "label_map", ".", "keys", "(", ")", ")", ")", "hex_values", "=", "[", "colors", ".", "to_hex", "(", "col", ")", "for", "col", "in", "rgb_values", "]", "color_map", "=", "dict", "(", "zip", "(", "label_map", ".", "keys", "(", ")", ",", "hex_values", ")", ")", "elif", "df", "[", "condition_col", "]", ".", "dtype", "==", "'bool'", ":", "condition", "=", "df", "[", "condition_col", "]", "default_label_with_condition", "=", "\"= {}\"", ".", "format", "(", "condition_col", ")", "default_label_no_condition", "=", "\"¬ {}\".", "f", "ormat(", "c", "ondition_col)", "", "with_condition_label", "=", "with_condition_label", "or", "default_label_with_condition", "no_condition_label", "=", "no_condition_label", "or", "default_label_no_condition", "if", "not", "label_map", ":", "label_map", "=", "{", "False", ":", "no_condition_label", ",", "True", ":", "with_condition_label", "}", "if", "not", "color_map", ":", "color_map", "=", "{", "False", ":", "no_condition_color", ",", "True", ":", "with_condition_color", "}", "else", ":", "raise", "ValueError", "(", "'Don\\'t know how to plot data of type\\\n {}'", ".", "format", "(", "df", "[", "condition_col", "]", ".", "dtype", ")", ")", "# produce kmf plot for each category (group) identified above", "kmf", "=", "KaplanMeierFitter", "(", ")", "grp_desc", "=", "list", "(", ")", "grp_survival_data", "=", "dict", "(", ")", "grp_event_data", "=", "dict", "(", ")", "grp_names", "=", "list", "(", "condition", ".", "unique", "(", ")", ")", "for", "grp_name", ",", "grp_df", "in", "df", ".", "groupby", "(", "condition", ")", ":", "grp_survival", "=", "grp_df", "[", "survival_col", "]", "grp_event", "=", "(", "grp_df", "[", "censor_col", "]", ".", "astype", "(", "bool", ")", ")", "grp_label", "=", "label_map", "[", "grp_name", "]", "grp_color", "=", "color_map", "[", "grp_name", "]", "kmf", ".", "fit", "(", "grp_survival", ",", "grp_event", ",", "label", "=", "grp_label", ")", "desc_str", "=", "\"# {}: {}\"", ".", "format", "(", "grp_label", ",", "len", "(", "grp_survival", ")", ")", "grp_desc", ".", "append", "(", "desc_str", ")", "grp_survival_data", "[", "grp_name", "]", "=", "grp_survival", "grp_event_data", "[", "grp_name", "]", "=", "grp_event", "if", "ax", ":", "ax", "=", "kmf", ".", "plot", "(", "ax", "=", "ax", ",", "show_censors", "=", "True", ",", "ci_show", "=", "ci_show", ",", "color", "=", "grp_color", ")", "else", ":", "ax", "=", "kmf", ".", "plot", "(", "show_censors", "=", "True", ",", "ci_show", "=", "ci_show", ",", "color", "=", "grp_color", ")", "## format the plot", "# Set the y-axis to range 0 to 1", "ax", ".", "set_ylim", "(", "0", ",", "1", ")", "y_tick_vals", "=", "ax", ".", "get_yticks", "(", ")", "ax", ".", "set_yticklabels", "(", "[", "\"%d\"", "%", "int", "(", "y_tick_val", "*", "100", ")", "for", "y_tick_val", "in", "y_tick_vals", "]", ")", "# plot title", "if", "title", ":", "ax", ".", "set_title", "(", "title", ")", "elif", "print_as_title", ":", "ax", ".", "set_title", "(", "' | '", ".", "join", "(", "grp_desc", ")", ")", "else", ":", "[", "print", "(", "desc", ")", "for", "desc", "in", "grp_desc", "]", "# axis labels", "if", "xlabel", ":", "ax", ".", "set_xlabel", "(", "xlabel", ")", "if", "ylabel", ":", "ax", ".", "set_ylabel", "(", "ylabel", ")", "## summarize analytical version of results", "## again using same groups as are plotted", "if", "len", "(", "grp_names", ")", "==", "2", ":", "# use log-rank test for 2 groups", "results", "=", "logrank_test", "(", "grp_survival_data", "[", "grp_names", "[", "0", "]", "]", ",", "grp_survival_data", "[", "grp_names", "[", "1", "]", "]", ",", "event_observed_A", "=", "grp_event_data", "[", "grp_names", "[", "0", "]", "]", ",", "event_observed_B", "=", "grp_event_data", "[", "grp_names", "[", "1", "]", "]", ")", "elif", "len", "(", "grp_names", ")", "==", "1", ":", "# no analytical result for 1 or 0 groups", "results", "=", "NullSurvivalResults", "(", ")", "else", ":", "# cox PH fitter for >2 groups", "cf", "=", "CoxPHFitter", "(", ")", "cox_df", "=", "patsy", ".", "dmatrix", "(", "'+'", ".", "join", "(", "[", "condition_col", ",", "survival_col", ",", "censor_col", "]", ")", ",", "df", ",", "return_type", "=", "'dataframe'", ")", "del", "cox_df", "[", "'Intercept'", "]", "results", "=", "cf", ".", "fit", "(", "cox_df", ",", "survival_col", ",", "event_col", "=", "censor_col", ")", "results", ".", "print_summary", "(", ")", "# add metadata to results object so caller can print them", "results", ".", "survival_data_series", "=", "grp_survival_data", "results", ".", "event_data_series", "=", "grp_event_data", "results", ".", "desc", "=", "grp_desc", "return", "results" ]
Helper function to produce a single KM survival plot, among observations in df by groups defined by condition_col. All inputs are required - this function is intended to be called by `plot_kmf`.
[ "Helper", "function", "to", "produce", "a", "single", "KM", "survival", "plot", "among", "observations", "in", "df", "by", "groups", "defined", "by", "condition_col", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/survival.py#L31-L171
hammerlab/cohorts
cohorts/survival.py
plot_kmf
def plot_kmf(df, condition_col, censor_col, survival_col, strata_col=None, threshold=None, title=None, xlabel=None, ylabel=None, ax=None, with_condition_color="#B38600", no_condition_color="#A941AC", with_condition_label=None, no_condition_label=None, color_map=None, label_map=None, color_palette="Set1", ci_show=False, print_as_title=False): """ Plot survival curves by splitting the dataset into two groups based on condition_col. Report results for a log-rank test (if two groups are plotted) or CoxPH survival analysis (if >2 groups) for association with survival. Regarding definition of groups: If condition_col is numeric, values are split into 2 groups. - if threshold is defined, the groups are split on being > or < condition_col - if threshold == 'median', the threshold is set to the median of condition_col If condition_col is categorical or string, results are plotted for each unique value in the dataset. If condition_col is None, results are plotted for all observations Currently, if `strata_col` is given, the results are repeated among each stratum of the df. A truly "stratified" analysis is not yet supported by may be soon. Parameters ---------- df: dataframe condition_col: string, column which contains the condition to split on survival_col: string, column which contains the survival time censor_col: string, strata_col: optional string, denoting column containing data to stratify by (default: None) threshold: int or string, if int, condition_col is thresholded at int, if 'median', condition_col thresholded at its median if 'median-per-strata', & if stratified analysis then condition_col thresholded by strata title: Title for the plot, default None ax: an existing matplotlib ax, optional, default None note: not currently supported when `strata_col` is not None with_condition_color: str, hex code color for the with-condition curve no_condition_color: str, hex code color for the no-condition curve with_condition_label: str, optional, label for True condition case no_condition_label: str, optional, label for False condition case color_map: dict, optional, mapping of hex-values to condition text in the form of {value_name: color_hex_code}. defaults to `sb.color_palette` using `default_color_palette` name, or *_condition_color options in case of boolean operators. label_map: dict, optional, mapping of labels to condition text. defaults to "condition_name = condition_value", or *_condition_label options in case of boolean operators. color_palette: str, optional, name of sb.color_palette to use if color_map not provided. print_as_title: bool, optional, whether or not to print text within the plot's title vs. stdout, default False """ # set reasonable default threshold value depending on type of condition_col if threshold is None: if df[condition_col].dtype != "bool" and \ np.issubdtype(df[condition_col].dtype, np.number): threshold = "median" # check inputs for threshold for validity elif isinstance(threshold, numbers.Number): logger.debug("threshold value is numeric") elif threshold not in ("median", "median-per-strata"): raise ValueError("invalid input for threshold. Must be numeric, None, 'median', or 'median-per-strata'.") elif threshold == "median-per-strata" and strata_col is None: raise ValueError("threshold given was 'median-per-strata' and yet `strata_col` was None. Did you mean 'median'?") # construct kwarg dict to pass to _plot_kmf_single. # start with args that do not vary according to strata_col arglist = dict( condition_col=condition_col, survival_col=survival_col, censor_col=censor_col, threshold=threshold, with_condition_color=with_condition_color, no_condition_color=no_condition_color, with_condition_label=with_condition_label, no_condition_label=no_condition_label, color_map=color_map, label_map=label_map, xlabel=xlabel, ylabel=ylabel, ci_show=ci_show, color_palette=color_palette, print_as_title=print_as_title) # if strata_col is None, pass all parameters to _plot_kmf_single if strata_col is None: arglist.update(dict( df=df, title=title, ax=ax)) return _plot_kmf_single(**arglist) else: # prepare for stratified analysis if threshold == "median": # by default, "median" threshold should be intra-strata median arglist["threshold"] = df[condition_col].dropna().median() elif threshold == "median-per-strata": arglist["threshold"] = "median" # create axis / subplots for stratified results if ax is not None: raise ValueError("ax not supported with stratified analysis.") n_strata = len(df[strata_col].unique()) f, ax = plt.subplots(n_strata, sharex=True) # create results dict to hold per-strata results results = dict() # call _plot_kmf_single for each of the strata for i, (strat_name, strat_df) in enumerate(df.groupby(strata_col)): if n_strata == 1: arglist["ax"] = ax else: arglist["ax"] = ax[i] subtitle = "{}: {}".format(strata_col, strat_name) arglist["title"] = subtitle arglist["df"] = strat_df results[subtitle] = plot_kmf(**arglist) [print(desc) for desc in results[subtitle].desc] if title: f.suptitle(title) return results
python
def plot_kmf(df, condition_col, censor_col, survival_col, strata_col=None, threshold=None, title=None, xlabel=None, ylabel=None, ax=None, with_condition_color="#B38600", no_condition_color="#A941AC", with_condition_label=None, no_condition_label=None, color_map=None, label_map=None, color_palette="Set1", ci_show=False, print_as_title=False): """ Plot survival curves by splitting the dataset into two groups based on condition_col. Report results for a log-rank test (if two groups are plotted) or CoxPH survival analysis (if >2 groups) for association with survival. Regarding definition of groups: If condition_col is numeric, values are split into 2 groups. - if threshold is defined, the groups are split on being > or < condition_col - if threshold == 'median', the threshold is set to the median of condition_col If condition_col is categorical or string, results are plotted for each unique value in the dataset. If condition_col is None, results are plotted for all observations Currently, if `strata_col` is given, the results are repeated among each stratum of the df. A truly "stratified" analysis is not yet supported by may be soon. Parameters ---------- df: dataframe condition_col: string, column which contains the condition to split on survival_col: string, column which contains the survival time censor_col: string, strata_col: optional string, denoting column containing data to stratify by (default: None) threshold: int or string, if int, condition_col is thresholded at int, if 'median', condition_col thresholded at its median if 'median-per-strata', & if stratified analysis then condition_col thresholded by strata title: Title for the plot, default None ax: an existing matplotlib ax, optional, default None note: not currently supported when `strata_col` is not None with_condition_color: str, hex code color for the with-condition curve no_condition_color: str, hex code color for the no-condition curve with_condition_label: str, optional, label for True condition case no_condition_label: str, optional, label for False condition case color_map: dict, optional, mapping of hex-values to condition text in the form of {value_name: color_hex_code}. defaults to `sb.color_palette` using `default_color_palette` name, or *_condition_color options in case of boolean operators. label_map: dict, optional, mapping of labels to condition text. defaults to "condition_name = condition_value", or *_condition_label options in case of boolean operators. color_palette: str, optional, name of sb.color_palette to use if color_map not provided. print_as_title: bool, optional, whether or not to print text within the plot's title vs. stdout, default False """ # set reasonable default threshold value depending on type of condition_col if threshold is None: if df[condition_col].dtype != "bool" and \ np.issubdtype(df[condition_col].dtype, np.number): threshold = "median" # check inputs for threshold for validity elif isinstance(threshold, numbers.Number): logger.debug("threshold value is numeric") elif threshold not in ("median", "median-per-strata"): raise ValueError("invalid input for threshold. Must be numeric, None, 'median', or 'median-per-strata'.") elif threshold == "median-per-strata" and strata_col is None: raise ValueError("threshold given was 'median-per-strata' and yet `strata_col` was None. Did you mean 'median'?") # construct kwarg dict to pass to _plot_kmf_single. # start with args that do not vary according to strata_col arglist = dict( condition_col=condition_col, survival_col=survival_col, censor_col=censor_col, threshold=threshold, with_condition_color=with_condition_color, no_condition_color=no_condition_color, with_condition_label=with_condition_label, no_condition_label=no_condition_label, color_map=color_map, label_map=label_map, xlabel=xlabel, ylabel=ylabel, ci_show=ci_show, color_palette=color_palette, print_as_title=print_as_title) # if strata_col is None, pass all parameters to _plot_kmf_single if strata_col is None: arglist.update(dict( df=df, title=title, ax=ax)) return _plot_kmf_single(**arglist) else: # prepare for stratified analysis if threshold == "median": # by default, "median" threshold should be intra-strata median arglist["threshold"] = df[condition_col].dropna().median() elif threshold == "median-per-strata": arglist["threshold"] = "median" # create axis / subplots for stratified results if ax is not None: raise ValueError("ax not supported with stratified analysis.") n_strata = len(df[strata_col].unique()) f, ax = plt.subplots(n_strata, sharex=True) # create results dict to hold per-strata results results = dict() # call _plot_kmf_single for each of the strata for i, (strat_name, strat_df) in enumerate(df.groupby(strata_col)): if n_strata == 1: arglist["ax"] = ax else: arglist["ax"] = ax[i] subtitle = "{}: {}".format(strata_col, strat_name) arglist["title"] = subtitle arglist["df"] = strat_df results[subtitle] = plot_kmf(**arglist) [print(desc) for desc in results[subtitle].desc] if title: f.suptitle(title) return results
[ "def", "plot_kmf", "(", "df", ",", "condition_col", ",", "censor_col", ",", "survival_col", ",", "strata_col", "=", "None", ",", "threshold", "=", "None", ",", "title", "=", "None", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ",", "ax", "=", "None", ",", "with_condition_color", "=", "\"#B38600\"", ",", "no_condition_color", "=", "\"#A941AC\"", ",", "with_condition_label", "=", "None", ",", "no_condition_label", "=", "None", ",", "color_map", "=", "None", ",", "label_map", "=", "None", ",", "color_palette", "=", "\"Set1\"", ",", "ci_show", "=", "False", ",", "print_as_title", "=", "False", ")", ":", "# set reasonable default threshold value depending on type of condition_col", "if", "threshold", "is", "None", ":", "if", "df", "[", "condition_col", "]", ".", "dtype", "!=", "\"bool\"", "and", "np", ".", "issubdtype", "(", "df", "[", "condition_col", "]", ".", "dtype", ",", "np", ".", "number", ")", ":", "threshold", "=", "\"median\"", "# check inputs for threshold for validity", "elif", "isinstance", "(", "threshold", ",", "numbers", ".", "Number", ")", ":", "logger", ".", "debug", "(", "\"threshold value is numeric\"", ")", "elif", "threshold", "not", "in", "(", "\"median\"", ",", "\"median-per-strata\"", ")", ":", "raise", "ValueError", "(", "\"invalid input for threshold. Must be numeric, None, 'median', or 'median-per-strata'.\"", ")", "elif", "threshold", "==", "\"median-per-strata\"", "and", "strata_col", "is", "None", ":", "raise", "ValueError", "(", "\"threshold given was 'median-per-strata' and yet `strata_col` was None. Did you mean 'median'?\"", ")", "# construct kwarg dict to pass to _plot_kmf_single.", "# start with args that do not vary according to strata_col", "arglist", "=", "dict", "(", "condition_col", "=", "condition_col", ",", "survival_col", "=", "survival_col", ",", "censor_col", "=", "censor_col", ",", "threshold", "=", "threshold", ",", "with_condition_color", "=", "with_condition_color", ",", "no_condition_color", "=", "no_condition_color", ",", "with_condition_label", "=", "with_condition_label", ",", "no_condition_label", "=", "no_condition_label", ",", "color_map", "=", "color_map", ",", "label_map", "=", "label_map", ",", "xlabel", "=", "xlabel", ",", "ylabel", "=", "ylabel", ",", "ci_show", "=", "ci_show", ",", "color_palette", "=", "color_palette", ",", "print_as_title", "=", "print_as_title", ")", "# if strata_col is None, pass all parameters to _plot_kmf_single", "if", "strata_col", "is", "None", ":", "arglist", ".", "update", "(", "dict", "(", "df", "=", "df", ",", "title", "=", "title", ",", "ax", "=", "ax", ")", ")", "return", "_plot_kmf_single", "(", "*", "*", "arglist", ")", "else", ":", "# prepare for stratified analysis", "if", "threshold", "==", "\"median\"", ":", "# by default, \"median\" threshold should be intra-strata median", "arglist", "[", "\"threshold\"", "]", "=", "df", "[", "condition_col", "]", ".", "dropna", "(", ")", ".", "median", "(", ")", "elif", "threshold", "==", "\"median-per-strata\"", ":", "arglist", "[", "\"threshold\"", "]", "=", "\"median\"", "# create axis / subplots for stratified results", "if", "ax", "is", "not", "None", ":", "raise", "ValueError", "(", "\"ax not supported with stratified analysis.\"", ")", "n_strata", "=", "len", "(", "df", "[", "strata_col", "]", ".", "unique", "(", ")", ")", "f", ",", "ax", "=", "plt", ".", "subplots", "(", "n_strata", ",", "sharex", "=", "True", ")", "# create results dict to hold per-strata results", "results", "=", "dict", "(", ")", "# call _plot_kmf_single for each of the strata", "for", "i", ",", "(", "strat_name", ",", "strat_df", ")", "in", "enumerate", "(", "df", ".", "groupby", "(", "strata_col", ")", ")", ":", "if", "n_strata", "==", "1", ":", "arglist", "[", "\"ax\"", "]", "=", "ax", "else", ":", "arglist", "[", "\"ax\"", "]", "=", "ax", "[", "i", "]", "subtitle", "=", "\"{}: {}\"", ".", "format", "(", "strata_col", ",", "strat_name", ")", "arglist", "[", "\"title\"", "]", "=", "subtitle", "arglist", "[", "\"df\"", "]", "=", "strat_df", "results", "[", "subtitle", "]", "=", "plot_kmf", "(", "*", "*", "arglist", ")", "[", "print", "(", "desc", ")", "for", "desc", "in", "results", "[", "subtitle", "]", ".", "desc", "]", "if", "title", ":", "f", ".", "suptitle", "(", "title", ")", "return", "results" ]
Plot survival curves by splitting the dataset into two groups based on condition_col. Report results for a log-rank test (if two groups are plotted) or CoxPH survival analysis (if >2 groups) for association with survival. Regarding definition of groups: If condition_col is numeric, values are split into 2 groups. - if threshold is defined, the groups are split on being > or < condition_col - if threshold == 'median', the threshold is set to the median of condition_col If condition_col is categorical or string, results are plotted for each unique value in the dataset. If condition_col is None, results are plotted for all observations Currently, if `strata_col` is given, the results are repeated among each stratum of the df. A truly "stratified" analysis is not yet supported by may be soon. Parameters ---------- df: dataframe condition_col: string, column which contains the condition to split on survival_col: string, column which contains the survival time censor_col: string, strata_col: optional string, denoting column containing data to stratify by (default: None) threshold: int or string, if int, condition_col is thresholded at int, if 'median', condition_col thresholded at its median if 'median-per-strata', & if stratified analysis then condition_col thresholded by strata title: Title for the plot, default None ax: an existing matplotlib ax, optional, default None note: not currently supported when `strata_col` is not None with_condition_color: str, hex code color for the with-condition curve no_condition_color: str, hex code color for the no-condition curve with_condition_label: str, optional, label for True condition case no_condition_label: str, optional, label for False condition case color_map: dict, optional, mapping of hex-values to condition text in the form of {value_name: color_hex_code}. defaults to `sb.color_palette` using `default_color_palette` name, or *_condition_color options in case of boolean operators. label_map: dict, optional, mapping of labels to condition text. defaults to "condition_name = condition_value", or *_condition_label options in case of boolean operators. color_palette: str, optional, name of sb.color_palette to use if color_map not provided. print_as_title: bool, optional, whether or not to print text within the plot's title vs. stdout, default False
[ "Plot", "survival", "curves", "by", "splitting", "the", "dataset", "into", "two", "groups", "based", "on", "condition_col", ".", "Report", "results", "for", "a", "log", "-", "rank", "test", "(", "if", "two", "groups", "are", "plotted", ")", "or", "CoxPH", "survival", "analysis", "(", "if", ">", "2", "groups", ")", "for", "association", "with", "survival", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/survival.py#L174-L307
alvarogzp/telegram-bot-framework
bot/action/util/textformat.py
FormattedText.concat
def concat(self, formatted_text): """:type formatted_text: FormattedText""" assert self._is_compatible(formatted_text), "Cannot concat text with different modes" self.text += formatted_text.text return self
python
def concat(self, formatted_text): """:type formatted_text: FormattedText""" assert self._is_compatible(formatted_text), "Cannot concat text with different modes" self.text += formatted_text.text return self
[ "def", "concat", "(", "self", ",", "formatted_text", ")", ":", "assert", "self", ".", "_is_compatible", "(", "formatted_text", ")", ",", "\"Cannot concat text with different modes\"", "self", ".", "text", "+=", "formatted_text", ".", "text", "return", "self" ]
:type formatted_text: FormattedText
[ ":", "type", "formatted_text", ":", "FormattedText" ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/textformat.py#L42-L46
alvarogzp/telegram-bot-framework
bot/action/util/textformat.py
FormattedText.join
def join(self, formatted_texts): """:type formatted_texts: list[FormattedText]""" formatted_texts = list(formatted_texts) # so that after the first iteration elements are not lost if generator for formatted_text in formatted_texts: assert self._is_compatible(formatted_text), "Cannot join text with different modes" self.text = self.text.join((formatted_text.text for formatted_text in formatted_texts)) return self
python
def join(self, formatted_texts): """:type formatted_texts: list[FormattedText]""" formatted_texts = list(formatted_texts) # so that after the first iteration elements are not lost if generator for formatted_text in formatted_texts: assert self._is_compatible(formatted_text), "Cannot join text with different modes" self.text = self.text.join((formatted_text.text for formatted_text in formatted_texts)) return self
[ "def", "join", "(", "self", ",", "formatted_texts", ")", ":", "formatted_texts", "=", "list", "(", "formatted_texts", ")", "# so that after the first iteration elements are not lost if generator", "for", "formatted_text", "in", "formatted_texts", ":", "assert", "self", ".", "_is_compatible", "(", "formatted_text", ")", ",", "\"Cannot join text with different modes\"", "self", ".", "text", "=", "self", ".", "text", ".", "join", "(", "(", "formatted_text", ".", "text", "for", "formatted_text", "in", "formatted_texts", ")", ")", "return", "self" ]
:type formatted_texts: list[FormattedText]
[ ":", "type", "formatted_texts", ":", "list", "[", "FormattedText", "]" ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/textformat.py#L48-L54
alvarogzp/telegram-bot-framework
bot/action/util/textformat.py
FormattedTextStringFormat.concat
def concat(self, *args, **kwargs): """ :type args: FormattedText :type kwargs: FormattedText """ for arg in args: assert self.formatted_text._is_compatible(arg), "Cannot concat text with different modes" self.format_args.append(arg.text) for kwarg in kwargs: value = kwargs[kwarg] assert self.formatted_text._is_compatible(value), "Cannot concat text with different modes" self.format_kwargs[kwarg] = value.text return self
python
def concat(self, *args, **kwargs): """ :type args: FormattedText :type kwargs: FormattedText """ for arg in args: assert self.formatted_text._is_compatible(arg), "Cannot concat text with different modes" self.format_args.append(arg.text) for kwarg in kwargs: value = kwargs[kwarg] assert self.formatted_text._is_compatible(value), "Cannot concat text with different modes" self.format_kwargs[kwarg] = value.text return self
[ "def", "concat", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "arg", "in", "args", ":", "assert", "self", ".", "formatted_text", ".", "_is_compatible", "(", "arg", ")", ",", "\"Cannot concat text with different modes\"", "self", ".", "format_args", ".", "append", "(", "arg", ".", "text", ")", "for", "kwarg", "in", "kwargs", ":", "value", "=", "kwargs", "[", "kwarg", "]", "assert", "self", ".", "formatted_text", ".", "_is_compatible", "(", "value", ")", ",", "\"Cannot concat text with different modes\"", "self", ".", "format_kwargs", "[", "kwarg", "]", "=", "value", ".", "text", "return", "self" ]
:type args: FormattedText :type kwargs: FormattedText
[ ":", "type", "args", ":", "FormattedText", ":", "type", "kwargs", ":", "FormattedText" ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/util/textformat.py#L251-L263
hammerlab/cohorts
cohorts/random.py
random_cohort
def random_cohort(size, cache_dir, data_dir=None, min_random_variants=None, max_random_variants=None, seed_val=1234): """ Parameters ---------- min_random_variants: optional, int Minimum number of random variants to be generated per patient. max_random_variants: optional, int Maximum number of random variants to be generated per patient. """ seed(seed_val) d = {} d["id"] = [str(id) for id in range(size)] d["age"] = choice([10, 15, 28, 32, 59, 62, 64, 66, 68], size) d["smoker"] = choice([False, True], size) d["OS"] = [randint(10, 1000) for i in range(size)] d["PFS"] = [int(os * 0.6) for os in d["OS"]] d["benefit"] = [pfs < 50 for pfs in d["PFS"]] d["random"] = [randint(100) for i in range(size)] d["random_boolean"] = choice([False, True], size) d["benefit_correlate"] = [randint(50) if benefit else randint(20) for benefit in d["benefit"]] d["benefit_correlate_boolean"] = [True if corr > 10 else False for corr in d["benefit_correlate"]] d["deceased"] = choice([False, True], size) d["progressed_or_deceased"] = [deceased or choice([False, True]) for deceased in d["deceased"]] df = pd.DataFrame(d) patients = [] for i, row in df.iterrows(): snv_vcf_paths = None if max_random_variants is not None and min_random_variants is not None: if data_dir is None: raise ValueError("data_dir must be provided if random variants are being generated.") vcf_path = path.join(data_dir, "patient_%s_mutect.vcf" % row["id"]) generate_simple_vcf( vcf_path, generate_random_missense_variants(num_variants=randint(min_random_variants, max_random_variants))) snv_vcf_paths = [vcf_path] patient = Patient( id=row["id"], os=row["OS"], pfs=row["PFS"], benefit=row["benefit"], deceased=row["deceased"], progressed_or_deceased=row["progressed_or_deceased"], hla_alleles=["HLA-A02:01"], variants={"snv": snv_vcf_paths}, additional_data=row) patients.append(patient) return Cohort( patients=patients, cache_dir=cache_dir, mhc_class=RandomBindingPredictor)
python
def random_cohort(size, cache_dir, data_dir=None, min_random_variants=None, max_random_variants=None, seed_val=1234): """ Parameters ---------- min_random_variants: optional, int Minimum number of random variants to be generated per patient. max_random_variants: optional, int Maximum number of random variants to be generated per patient. """ seed(seed_val) d = {} d["id"] = [str(id) for id in range(size)] d["age"] = choice([10, 15, 28, 32, 59, 62, 64, 66, 68], size) d["smoker"] = choice([False, True], size) d["OS"] = [randint(10, 1000) for i in range(size)] d["PFS"] = [int(os * 0.6) for os in d["OS"]] d["benefit"] = [pfs < 50 for pfs in d["PFS"]] d["random"] = [randint(100) for i in range(size)] d["random_boolean"] = choice([False, True], size) d["benefit_correlate"] = [randint(50) if benefit else randint(20) for benefit in d["benefit"]] d["benefit_correlate_boolean"] = [True if corr > 10 else False for corr in d["benefit_correlate"]] d["deceased"] = choice([False, True], size) d["progressed_or_deceased"] = [deceased or choice([False, True]) for deceased in d["deceased"]] df = pd.DataFrame(d) patients = [] for i, row in df.iterrows(): snv_vcf_paths = None if max_random_variants is not None and min_random_variants is not None: if data_dir is None: raise ValueError("data_dir must be provided if random variants are being generated.") vcf_path = path.join(data_dir, "patient_%s_mutect.vcf" % row["id"]) generate_simple_vcf( vcf_path, generate_random_missense_variants(num_variants=randint(min_random_variants, max_random_variants))) snv_vcf_paths = [vcf_path] patient = Patient( id=row["id"], os=row["OS"], pfs=row["PFS"], benefit=row["benefit"], deceased=row["deceased"], progressed_or_deceased=row["progressed_or_deceased"], hla_alleles=["HLA-A02:01"], variants={"snv": snv_vcf_paths}, additional_data=row) patients.append(patient) return Cohort( patients=patients, cache_dir=cache_dir, mhc_class=RandomBindingPredictor)
[ "def", "random_cohort", "(", "size", ",", "cache_dir", ",", "data_dir", "=", "None", ",", "min_random_variants", "=", "None", ",", "max_random_variants", "=", "None", ",", "seed_val", "=", "1234", ")", ":", "seed", "(", "seed_val", ")", "d", "=", "{", "}", "d", "[", "\"id\"", "]", "=", "[", "str", "(", "id", ")", "for", "id", "in", "range", "(", "size", ")", "]", "d", "[", "\"age\"", "]", "=", "choice", "(", "[", "10", ",", "15", ",", "28", ",", "32", ",", "59", ",", "62", ",", "64", ",", "66", ",", "68", "]", ",", "size", ")", "d", "[", "\"smoker\"", "]", "=", "choice", "(", "[", "False", ",", "True", "]", ",", "size", ")", "d", "[", "\"OS\"", "]", "=", "[", "randint", "(", "10", ",", "1000", ")", "for", "i", "in", "range", "(", "size", ")", "]", "d", "[", "\"PFS\"", "]", "=", "[", "int", "(", "os", "*", "0.6", ")", "for", "os", "in", "d", "[", "\"OS\"", "]", "]", "d", "[", "\"benefit\"", "]", "=", "[", "pfs", "<", "50", "for", "pfs", "in", "d", "[", "\"PFS\"", "]", "]", "d", "[", "\"random\"", "]", "=", "[", "randint", "(", "100", ")", "for", "i", "in", "range", "(", "size", ")", "]", "d", "[", "\"random_boolean\"", "]", "=", "choice", "(", "[", "False", ",", "True", "]", ",", "size", ")", "d", "[", "\"benefit_correlate\"", "]", "=", "[", "randint", "(", "50", ")", "if", "benefit", "else", "randint", "(", "20", ")", "for", "benefit", "in", "d", "[", "\"benefit\"", "]", "]", "d", "[", "\"benefit_correlate_boolean\"", "]", "=", "[", "True", "if", "corr", ">", "10", "else", "False", "for", "corr", "in", "d", "[", "\"benefit_correlate\"", "]", "]", "d", "[", "\"deceased\"", "]", "=", "choice", "(", "[", "False", ",", "True", "]", ",", "size", ")", "d", "[", "\"progressed_or_deceased\"", "]", "=", "[", "deceased", "or", "choice", "(", "[", "False", ",", "True", "]", ")", "for", "deceased", "in", "d", "[", "\"deceased\"", "]", "]", "df", "=", "pd", ".", "DataFrame", "(", "d", ")", "patients", "=", "[", "]", "for", "i", ",", "row", "in", "df", ".", "iterrows", "(", ")", ":", "snv_vcf_paths", "=", "None", "if", "max_random_variants", "is", "not", "None", "and", "min_random_variants", "is", "not", "None", ":", "if", "data_dir", "is", "None", ":", "raise", "ValueError", "(", "\"data_dir must be provided if random variants are being generated.\"", ")", "vcf_path", "=", "path", ".", "join", "(", "data_dir", ",", "\"patient_%s_mutect.vcf\"", "%", "row", "[", "\"id\"", "]", ")", "generate_simple_vcf", "(", "vcf_path", ",", "generate_random_missense_variants", "(", "num_variants", "=", "randint", "(", "min_random_variants", ",", "max_random_variants", ")", ")", ")", "snv_vcf_paths", "=", "[", "vcf_path", "]", "patient", "=", "Patient", "(", "id", "=", "row", "[", "\"id\"", "]", ",", "os", "=", "row", "[", "\"OS\"", "]", ",", "pfs", "=", "row", "[", "\"PFS\"", "]", ",", "benefit", "=", "row", "[", "\"benefit\"", "]", ",", "deceased", "=", "row", "[", "\"deceased\"", "]", ",", "progressed_or_deceased", "=", "row", "[", "\"progressed_or_deceased\"", "]", ",", "hla_alleles", "=", "[", "\"HLA-A02:01\"", "]", ",", "variants", "=", "{", "\"snv\"", ":", "snv_vcf_paths", "}", ",", "additional_data", "=", "row", ")", "patients", ".", "append", "(", "patient", ")", "return", "Cohort", "(", "patients", "=", "patients", ",", "cache_dir", "=", "cache_dir", ",", "mhc_class", "=", "RandomBindingPredictor", ")" ]
Parameters ---------- min_random_variants: optional, int Minimum number of random variants to be generated per patient. max_random_variants: optional, int Maximum number of random variants to be generated per patient.
[ "Parameters", "----------", "min_random_variants", ":", "optional", "int", "Minimum", "number", "of", "random", "variants", "to", "be", "generated", "per", "patient", ".", "max_random_variants", ":", "optional", "int", "Maximum", "number", "of", "random", "variants", "to", "be", "generated", "per", "patient", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/random.py#L24-L75
hammerlab/cohorts
cohorts/random.py
generate_random_missense_variants
def generate_random_missense_variants(num_variants=10, max_search=100000, reference="GRCh37"): """ Generate a random collection of missense variants by trying random variants repeatedly. """ variants = [] for i in range(max_search): bases = ["A", "C", "T", "G"] random_ref = choice(bases) bases.remove(random_ref) random_alt = choice(bases) random_contig = choice(["1", "2", "3", "4", "5"]) random_variant = Variant(contig=random_contig, start=randint(1, 1000000), ref=random_ref, alt=random_alt, ensembl=reference) try: effects = random_variant.effects() for effect in effects: if isinstance(effect, Substitution): variants.append(random_variant) break except: continue if len(variants) == num_variants: break return VariantCollection(variants)
python
def generate_random_missense_variants(num_variants=10, max_search=100000, reference="GRCh37"): """ Generate a random collection of missense variants by trying random variants repeatedly. """ variants = [] for i in range(max_search): bases = ["A", "C", "T", "G"] random_ref = choice(bases) bases.remove(random_ref) random_alt = choice(bases) random_contig = choice(["1", "2", "3", "4", "5"]) random_variant = Variant(contig=random_contig, start=randint(1, 1000000), ref=random_ref, alt=random_alt, ensembl=reference) try: effects = random_variant.effects() for effect in effects: if isinstance(effect, Substitution): variants.append(random_variant) break except: continue if len(variants) == num_variants: break return VariantCollection(variants)
[ "def", "generate_random_missense_variants", "(", "num_variants", "=", "10", ",", "max_search", "=", "100000", ",", "reference", "=", "\"GRCh37\"", ")", ":", "variants", "=", "[", "]", "for", "i", "in", "range", "(", "max_search", ")", ":", "bases", "=", "[", "\"A\"", ",", "\"C\"", ",", "\"T\"", ",", "\"G\"", "]", "random_ref", "=", "choice", "(", "bases", ")", "bases", ".", "remove", "(", "random_ref", ")", "random_alt", "=", "choice", "(", "bases", ")", "random_contig", "=", "choice", "(", "[", "\"1\"", ",", "\"2\"", ",", "\"3\"", ",", "\"4\"", ",", "\"5\"", "]", ")", "random_variant", "=", "Variant", "(", "contig", "=", "random_contig", ",", "start", "=", "randint", "(", "1", ",", "1000000", ")", ",", "ref", "=", "random_ref", ",", "alt", "=", "random_alt", ",", "ensembl", "=", "reference", ")", "try", ":", "effects", "=", "random_variant", ".", "effects", "(", ")", "for", "effect", "in", "effects", ":", "if", "isinstance", "(", "effect", ",", "Substitution", ")", ":", "variants", ".", "append", "(", "random_variant", ")", "break", "except", ":", "continue", "if", "len", "(", "variants", ")", "==", "num_variants", ":", "break", "return", "VariantCollection", "(", "variants", ")" ]
Generate a random collection of missense variants by trying random variants repeatedly.
[ "Generate", "a", "random", "collection", "of", "missense", "variants", "by", "trying", "random", "variants", "repeatedly", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/random.py#L77-L100
hammerlab/cohorts
cohorts/random.py
generate_simple_vcf
def generate_simple_vcf(filename, variant_collection): """ Output a very simple metadata-free VCF for each variant in a variant_collection. """ contigs = [] positions = [] refs = [] alts = [] for variant in variant_collection: contigs.append("chr" + variant.contig) positions.append(variant.start) refs.append(variant.ref) alts.append(variant.alt) df = pd.DataFrame() df["contig"] = contigs df["position"] = positions df["id"] = ["."] * len(variant_collection) df["ref"] = refs df["alt"] = alts df["qual"] = ["."] * len(variant_collection) df["filter"] = ["."] * len(variant_collection) df["info"] = ["."] * len(variant_collection) df["format"] = ["GT:AD:DP"] * len(variant_collection) normal_ref_depths = [randint(1, 10) for v in variant_collection] normal_alt_depths = [randint(1, 10) for v in variant_collection] df["n1"] = ["0:%d,%d:%d" % (normal_ref_depths[i], normal_alt_depths[i], normal_ref_depths[i] + normal_alt_depths[i]) for i in range(len(variant_collection))] tumor_ref_depths = [randint(1, 10) for v in variant_collection] tumor_alt_depths = [randint(1, 10) for v in variant_collection] df["t1"] = ["0/1:%d,%d:%d" % (tumor_ref_depths[i], tumor_alt_depths[i], tumor_ref_depths[i] + tumor_alt_depths[i]) for i in range(len(variant_collection))] with open(filename, "w") as f: f.write("##fileformat=VCFv4.1\n") f.write("##reference=file:///projects/ngs/resources/gatk/2.3/ucsc.hg19.fasta\n") with open(filename, "a") as f: df.to_csv(f, sep="\t", index=None, header=None)
python
def generate_simple_vcf(filename, variant_collection): """ Output a very simple metadata-free VCF for each variant in a variant_collection. """ contigs = [] positions = [] refs = [] alts = [] for variant in variant_collection: contigs.append("chr" + variant.contig) positions.append(variant.start) refs.append(variant.ref) alts.append(variant.alt) df = pd.DataFrame() df["contig"] = contigs df["position"] = positions df["id"] = ["."] * len(variant_collection) df["ref"] = refs df["alt"] = alts df["qual"] = ["."] * len(variant_collection) df["filter"] = ["."] * len(variant_collection) df["info"] = ["."] * len(variant_collection) df["format"] = ["GT:AD:DP"] * len(variant_collection) normal_ref_depths = [randint(1, 10) for v in variant_collection] normal_alt_depths = [randint(1, 10) for v in variant_collection] df["n1"] = ["0:%d,%d:%d" % (normal_ref_depths[i], normal_alt_depths[i], normal_ref_depths[i] + normal_alt_depths[i]) for i in range(len(variant_collection))] tumor_ref_depths = [randint(1, 10) for v in variant_collection] tumor_alt_depths = [randint(1, 10) for v in variant_collection] df["t1"] = ["0/1:%d,%d:%d" % (tumor_ref_depths[i], tumor_alt_depths[i], tumor_ref_depths[i] + tumor_alt_depths[i]) for i in range(len(variant_collection))] with open(filename, "w") as f: f.write("##fileformat=VCFv4.1\n") f.write("##reference=file:///projects/ngs/resources/gatk/2.3/ucsc.hg19.fasta\n") with open(filename, "a") as f: df.to_csv(f, sep="\t", index=None, header=None)
[ "def", "generate_simple_vcf", "(", "filename", ",", "variant_collection", ")", ":", "contigs", "=", "[", "]", "positions", "=", "[", "]", "refs", "=", "[", "]", "alts", "=", "[", "]", "for", "variant", "in", "variant_collection", ":", "contigs", ".", "append", "(", "\"chr\"", "+", "variant", ".", "contig", ")", "positions", ".", "append", "(", "variant", ".", "start", ")", "refs", ".", "append", "(", "variant", ".", "ref", ")", "alts", ".", "append", "(", "variant", ".", "alt", ")", "df", "=", "pd", ".", "DataFrame", "(", ")", "df", "[", "\"contig\"", "]", "=", "contigs", "df", "[", "\"position\"", "]", "=", "positions", "df", "[", "\"id\"", "]", "=", "[", "\".\"", "]", "*", "len", "(", "variant_collection", ")", "df", "[", "\"ref\"", "]", "=", "refs", "df", "[", "\"alt\"", "]", "=", "alts", "df", "[", "\"qual\"", "]", "=", "[", "\".\"", "]", "*", "len", "(", "variant_collection", ")", "df", "[", "\"filter\"", "]", "=", "[", "\".\"", "]", "*", "len", "(", "variant_collection", ")", "df", "[", "\"info\"", "]", "=", "[", "\".\"", "]", "*", "len", "(", "variant_collection", ")", "df", "[", "\"format\"", "]", "=", "[", "\"GT:AD:DP\"", "]", "*", "len", "(", "variant_collection", ")", "normal_ref_depths", "=", "[", "randint", "(", "1", ",", "10", ")", "for", "v", "in", "variant_collection", "]", "normal_alt_depths", "=", "[", "randint", "(", "1", ",", "10", ")", "for", "v", "in", "variant_collection", "]", "df", "[", "\"n1\"", "]", "=", "[", "\"0:%d,%d:%d\"", "%", "(", "normal_ref_depths", "[", "i", "]", ",", "normal_alt_depths", "[", "i", "]", ",", "normal_ref_depths", "[", "i", "]", "+", "normal_alt_depths", "[", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "variant_collection", ")", ")", "]", "tumor_ref_depths", "=", "[", "randint", "(", "1", ",", "10", ")", "for", "v", "in", "variant_collection", "]", "tumor_alt_depths", "=", "[", "randint", "(", "1", ",", "10", ")", "for", "v", "in", "variant_collection", "]", "df", "[", "\"t1\"", "]", "=", "[", "\"0/1:%d,%d:%d\"", "%", "(", "tumor_ref_depths", "[", "i", "]", ",", "tumor_alt_depths", "[", "i", "]", ",", "tumor_ref_depths", "[", "i", "]", "+", "tumor_alt_depths", "[", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "variant_collection", ")", ")", "]", "with", "open", "(", "filename", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "\"##fileformat=VCFv4.1\\n\"", ")", "f", ".", "write", "(", "\"##reference=file:///projects/ngs/resources/gatk/2.3/ucsc.hg19.fasta\\n\"", ")", "with", "open", "(", "filename", ",", "\"a\"", ")", "as", "f", ":", "df", ".", "to_csv", "(", "f", ",", "sep", "=", "\"\\t\"", ",", "index", "=", "None", ",", "header", "=", "None", ")" ]
Output a very simple metadata-free VCF for each variant in a variant_collection.
[ "Output", "a", "very", "simple", "metadata", "-", "free", "VCF", "for", "each", "variant", "in", "a", "variant_collection", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/random.py#L102-L140
carletes/mock-ssh-server
mockssh/sftp.py
SFTPServerInterface.list_folder
def list_folder(self, path): """Looks up folder contents of `path.`""" # Inspired by https://github.com/rspivak/sftpserver/blob/0.3/src/sftpserver/stub_sftp.py#L70 try: folder_contents = [] for f in os.listdir(path): attr = paramiko.SFTPAttributes.from_stat(os.stat(os.path.join(path, f))) attr.filename = f folder_contents.append(attr) return folder_contents except OSError as e: return SFTPServer.convert_errno(e.errno)
python
def list_folder(self, path): """Looks up folder contents of `path.`""" # Inspired by https://github.com/rspivak/sftpserver/blob/0.3/src/sftpserver/stub_sftp.py#L70 try: folder_contents = [] for f in os.listdir(path): attr = paramiko.SFTPAttributes.from_stat(os.stat(os.path.join(path, f))) attr.filename = f folder_contents.append(attr) return folder_contents except OSError as e: return SFTPServer.convert_errno(e.errno)
[ "def", "list_folder", "(", "self", ",", "path", ")", ":", "# Inspired by https://github.com/rspivak/sftpserver/blob/0.3/src/sftpserver/stub_sftp.py#L70", "try", ":", "folder_contents", "=", "[", "]", "for", "f", "in", "os", ".", "listdir", "(", "path", ")", ":", "attr", "=", "paramiko", ".", "SFTPAttributes", ".", "from_stat", "(", "os", ".", "stat", "(", "os", ".", "path", ".", "join", "(", "path", ",", "f", ")", ")", ")", "attr", ".", "filename", "=", "f", "folder_contents", ".", "append", "(", "attr", ")", "return", "folder_contents", "except", "OSError", "as", "e", ":", "return", "SFTPServer", ".", "convert_errno", "(", "e", ".", "errno", ")" ]
Looks up folder contents of `path.`
[ "Looks", "up", "folder", "contents", "of", "path", "." ]
train
https://github.com/carletes/mock-ssh-server/blob/0d724ad4a43bafcb6a4bbe28b52383528f3460cc/mockssh/sftp.py#L142-L153
hammerlab/cohorts
cohorts/varcode_utils.py
filter_variants
def filter_variants(variant_collection, patient, filter_fn, **kwargs): """Filter variants from the Variant Collection Parameters ---------- variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn: function Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved. Returns ------- varcode.VariantCollection Filtered variant collection, with only the variants passing the filter """ if filter_fn: return variant_collection.clone_with_new_elements([ variant for variant in variant_collection if filter_fn(FilterableVariant( variant=variant, variant_collection=variant_collection, patient=patient, ), **kwargs) ]) else: return variant_collection
python
def filter_variants(variant_collection, patient, filter_fn, **kwargs): """Filter variants from the Variant Collection Parameters ---------- variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn: function Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved. Returns ------- varcode.VariantCollection Filtered variant collection, with only the variants passing the filter """ if filter_fn: return variant_collection.clone_with_new_elements([ variant for variant in variant_collection if filter_fn(FilterableVariant( variant=variant, variant_collection=variant_collection, patient=patient, ), **kwargs) ]) else: return variant_collection
[ "def", "filter_variants", "(", "variant_collection", ",", "patient", ",", "filter_fn", ",", "*", "*", "kwargs", ")", ":", "if", "filter_fn", ":", "return", "variant_collection", ".", "clone_with_new_elements", "(", "[", "variant", "for", "variant", "in", "variant_collection", "if", "filter_fn", "(", "FilterableVariant", "(", "variant", "=", "variant", ",", "variant_collection", "=", "variant_collection", ",", "patient", "=", "patient", ",", ")", ",", "*", "*", "kwargs", ")", "]", ")", "else", ":", "return", "variant_collection" ]
Filter variants from the Variant Collection Parameters ---------- variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn: function Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved. Returns ------- varcode.VariantCollection Filtered variant collection, with only the variants passing the filter
[ "Filter", "variants", "from", "the", "Variant", "Collection" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/varcode_utils.py#L79-L105
hammerlab/cohorts
cohorts/varcode_utils.py
filter_effects
def filter_effects(effect_collection, variant_collection, patient, filter_fn, all_effects, **kwargs): """Filter variants from the Effect Collection Parameters ---------- effect_collection : varcode.EffectCollection variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn : function Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved. all_effects : boolean Return the single, top-priority effect if False. If True, return all effects (don't filter to top-priority). Returns ------- varcode.EffectCollection Filtered effect collection, with only the variants passing the filter """ def top_priority_maybe(effect_collection): """ Always (unless all_effects=True) take the top priority effect per variant so we end up with a single effect per variant. """ if all_effects: return effect_collection return EffectCollection(list(effect_collection.top_priority_effect_per_variant().values())) def apply_filter_fn(filter_fn, effect): """ Return True if filter_fn is true for the effect or its alternate_effect. If no alternate_effect, then just return True if filter_fn is True. """ applied = filter_fn(FilterableEffect( effect=effect, variant_collection=variant_collection, patient=patient), **kwargs) if hasattr(effect, "alternate_effect"): applied_alternate = filter_fn(FilterableEffect( effect=effect.alternate_effect, variant_collection=variant_collection, patient=patient), **kwargs) return applied or applied_alternate return applied if filter_fn: return top_priority_maybe(EffectCollection([ effect for effect in effect_collection if apply_filter_fn(filter_fn, effect)])) else: return top_priority_maybe(effect_collection)
python
def filter_effects(effect_collection, variant_collection, patient, filter_fn, all_effects, **kwargs): """Filter variants from the Effect Collection Parameters ---------- effect_collection : varcode.EffectCollection variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn : function Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved. all_effects : boolean Return the single, top-priority effect if False. If True, return all effects (don't filter to top-priority). Returns ------- varcode.EffectCollection Filtered effect collection, with only the variants passing the filter """ def top_priority_maybe(effect_collection): """ Always (unless all_effects=True) take the top priority effect per variant so we end up with a single effect per variant. """ if all_effects: return effect_collection return EffectCollection(list(effect_collection.top_priority_effect_per_variant().values())) def apply_filter_fn(filter_fn, effect): """ Return True if filter_fn is true for the effect or its alternate_effect. If no alternate_effect, then just return True if filter_fn is True. """ applied = filter_fn(FilterableEffect( effect=effect, variant_collection=variant_collection, patient=patient), **kwargs) if hasattr(effect, "alternate_effect"): applied_alternate = filter_fn(FilterableEffect( effect=effect.alternate_effect, variant_collection=variant_collection, patient=patient), **kwargs) return applied or applied_alternate return applied if filter_fn: return top_priority_maybe(EffectCollection([ effect for effect in effect_collection if apply_filter_fn(filter_fn, effect)])) else: return top_priority_maybe(effect_collection)
[ "def", "filter_effects", "(", "effect_collection", ",", "variant_collection", ",", "patient", ",", "filter_fn", ",", "all_effects", ",", "*", "*", "kwargs", ")", ":", "def", "top_priority_maybe", "(", "effect_collection", ")", ":", "\"\"\"\n Always (unless all_effects=True) take the top priority effect per variant\n so we end up with a single effect per variant.\n \"\"\"", "if", "all_effects", ":", "return", "effect_collection", "return", "EffectCollection", "(", "list", "(", "effect_collection", ".", "top_priority_effect_per_variant", "(", ")", ".", "values", "(", ")", ")", ")", "def", "apply_filter_fn", "(", "filter_fn", ",", "effect", ")", ":", "\"\"\"\n Return True if filter_fn is true for the effect or its alternate_effect.\n If no alternate_effect, then just return True if filter_fn is True.\n \"\"\"", "applied", "=", "filter_fn", "(", "FilterableEffect", "(", "effect", "=", "effect", ",", "variant_collection", "=", "variant_collection", ",", "patient", "=", "patient", ")", ",", "*", "*", "kwargs", ")", "if", "hasattr", "(", "effect", ",", "\"alternate_effect\"", ")", ":", "applied_alternate", "=", "filter_fn", "(", "FilterableEffect", "(", "effect", "=", "effect", ".", "alternate_effect", ",", "variant_collection", "=", "variant_collection", ",", "patient", "=", "patient", ")", ",", "*", "*", "kwargs", ")", "return", "applied", "or", "applied_alternate", "return", "applied", "if", "filter_fn", ":", "return", "top_priority_maybe", "(", "EffectCollection", "(", "[", "effect", "for", "effect", "in", "effect_collection", "if", "apply_filter_fn", "(", "filter_fn", ",", "effect", ")", "]", ")", ")", "else", ":", "return", "top_priority_maybe", "(", "effect_collection", ")" ]
Filter variants from the Effect Collection Parameters ---------- effect_collection : varcode.EffectCollection variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn : function Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved. all_effects : boolean Return the single, top-priority effect if False. If True, return all effects (don't filter to top-priority). Returns ------- varcode.EffectCollection Filtered effect collection, with only the variants passing the filter
[ "Filter", "variants", "from", "the", "Effect", "Collection" ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/varcode_utils.py#L107-L157
garethr/django-timelog
src/timelog/lib.py
count_lines_in
def count_lines_in(filename): "Count lines in a file" f = open(filename) lines = 0 buf_size = 1024 * 1024 read_f = f.read # loop optimization buf = read_f(buf_size) while buf: lines += buf.count('\n') buf = read_f(buf_size) return lines
python
def count_lines_in(filename): "Count lines in a file" f = open(filename) lines = 0 buf_size = 1024 * 1024 read_f = f.read # loop optimization buf = read_f(buf_size) while buf: lines += buf.count('\n') buf = read_f(buf_size) return lines
[ "def", "count_lines_in", "(", "filename", ")", ":", "f", "=", "open", "(", "filename", ")", "lines", "=", "0", "buf_size", "=", "1024", "*", "1024", "read_f", "=", "f", ".", "read", "# loop optimization", "buf", "=", "read_f", "(", "buf_size", ")", "while", "buf", ":", "lines", "+=", "buf", ".", "count", "(", "'\\n'", ")", "buf", "=", "read_f", "(", "buf_size", ")", "return", "lines" ]
Count lines in a file
[ "Count", "lines", "in", "a", "file" ]
train
https://github.com/garethr/django-timelog/blob/84c7015248a82faccb9d3fe4e6014645cc9ec103/src/timelog/lib.py#L16-L28
garethr/django-timelog
src/timelog/lib.py
view_name_from
def view_name_from(path): "Resolve a path to the full python module name of the related view function" try: return CACHED_VIEWS[path] except KeyError: view = resolve(path) module = path name = '' if hasattr(view.func, '__module__'): module = resolve(path).func.__module__ if hasattr(view.func, '__name__'): name = resolve(path).func.__name__ view = "%s.%s" % (module, name) CACHED_VIEWS[path] = view return view
python
def view_name_from(path): "Resolve a path to the full python module name of the related view function" try: return CACHED_VIEWS[path] except KeyError: view = resolve(path) module = path name = '' if hasattr(view.func, '__module__'): module = resolve(path).func.__module__ if hasattr(view.func, '__name__'): name = resolve(path).func.__name__ view = "%s.%s" % (module, name) CACHED_VIEWS[path] = view return view
[ "def", "view_name_from", "(", "path", ")", ":", "try", ":", "return", "CACHED_VIEWS", "[", "path", "]", "except", "KeyError", ":", "view", "=", "resolve", "(", "path", ")", "module", "=", "path", "name", "=", "''", "if", "hasattr", "(", "view", ".", "func", ",", "'__module__'", ")", ":", "module", "=", "resolve", "(", "path", ")", ".", "func", ".", "__module__", "if", "hasattr", "(", "view", ".", "func", ",", "'__name__'", ")", ":", "name", "=", "resolve", "(", "path", ")", ".", "func", ".", "__name__", "view", "=", "\"%s.%s\"", "%", "(", "module", ",", "name", ")", "CACHED_VIEWS", "[", "path", "]", "=", "view", "return", "view" ]
Resolve a path to the full python module name of the related view function
[ "Resolve", "a", "path", "to", "the", "full", "python", "module", "name", "of", "the", "related", "view", "function" ]
train
https://github.com/garethr/django-timelog/blob/84c7015248a82faccb9d3fe4e6014645cc9ec103/src/timelog/lib.py#L30-L46
garethr/django-timelog
src/timelog/lib.py
generate_table_from
def generate_table_from(data): "Output a nicely formatted ascii table" table = Texttable(max_width=120) table.add_row(["view", "method", "status", "count", "minimum", "maximum", "mean", "stdev", "queries", "querytime"]) table.set_cols_align(["l", "l", "l", "r", "r", "r", "r", "r", "r", "r"]) for item in sorted(data): mean = round(sum(data[item]['times'])/data[item]['count'], 3) mean_sql = round(sum(data[item]['sql'])/data[item]['count'], 3) mean_sqltime = round(sum(data[item]['sqltime'])/data[item]['count'], 3) sdsq = sum([(i - mean) ** 2 for i in data[item]['times']]) try: stdev = '%.2f' % ((sdsq / (len(data[item]['times']) - 1)) ** .5) except ZeroDivisionError: stdev = '0.00' minimum = "%.2f" % min(data[item]['times']) maximum = "%.2f" % max(data[item]['times']) table.add_row([data[item]['view'], data[item]['method'], data[item]['status'], data[item]['count'], minimum, maximum, '%.3f' % mean, stdev, mean_sql, mean_sqltime]) return table.draw()
python
def generate_table_from(data): "Output a nicely formatted ascii table" table = Texttable(max_width=120) table.add_row(["view", "method", "status", "count", "minimum", "maximum", "mean", "stdev", "queries", "querytime"]) table.set_cols_align(["l", "l", "l", "r", "r", "r", "r", "r", "r", "r"]) for item in sorted(data): mean = round(sum(data[item]['times'])/data[item]['count'], 3) mean_sql = round(sum(data[item]['sql'])/data[item]['count'], 3) mean_sqltime = round(sum(data[item]['sqltime'])/data[item]['count'], 3) sdsq = sum([(i - mean) ** 2 for i in data[item]['times']]) try: stdev = '%.2f' % ((sdsq / (len(data[item]['times']) - 1)) ** .5) except ZeroDivisionError: stdev = '0.00' minimum = "%.2f" % min(data[item]['times']) maximum = "%.2f" % max(data[item]['times']) table.add_row([data[item]['view'], data[item]['method'], data[item]['status'], data[item]['count'], minimum, maximum, '%.3f' % mean, stdev, mean_sql, mean_sqltime]) return table.draw()
[ "def", "generate_table_from", "(", "data", ")", ":", "table", "=", "Texttable", "(", "max_width", "=", "120", ")", "table", ".", "add_row", "(", "[", "\"view\"", ",", "\"method\"", ",", "\"status\"", ",", "\"count\"", ",", "\"minimum\"", ",", "\"maximum\"", ",", "\"mean\"", ",", "\"stdev\"", ",", "\"queries\"", ",", "\"querytime\"", "]", ")", "table", ".", "set_cols_align", "(", "[", "\"l\"", ",", "\"l\"", ",", "\"l\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", "]", ")", "for", "item", "in", "sorted", "(", "data", ")", ":", "mean", "=", "round", "(", "sum", "(", "data", "[", "item", "]", "[", "'times'", "]", ")", "/", "data", "[", "item", "]", "[", "'count'", "]", ",", "3", ")", "mean_sql", "=", "round", "(", "sum", "(", "data", "[", "item", "]", "[", "'sql'", "]", ")", "/", "data", "[", "item", "]", "[", "'count'", "]", ",", "3", ")", "mean_sqltime", "=", "round", "(", "sum", "(", "data", "[", "item", "]", "[", "'sqltime'", "]", ")", "/", "data", "[", "item", "]", "[", "'count'", "]", ",", "3", ")", "sdsq", "=", "sum", "(", "[", "(", "i", "-", "mean", ")", "**", "2", "for", "i", "in", "data", "[", "item", "]", "[", "'times'", "]", "]", ")", "try", ":", "stdev", "=", "'%.2f'", "%", "(", "(", "sdsq", "/", "(", "len", "(", "data", "[", "item", "]", "[", "'times'", "]", ")", "-", "1", ")", ")", "**", ".5", ")", "except", "ZeroDivisionError", ":", "stdev", "=", "'0.00'", "minimum", "=", "\"%.2f\"", "%", "min", "(", "data", "[", "item", "]", "[", "'times'", "]", ")", "maximum", "=", "\"%.2f\"", "%", "max", "(", "data", "[", "item", "]", "[", "'times'", "]", ")", "table", ".", "add_row", "(", "[", "data", "[", "item", "]", "[", "'view'", "]", ",", "data", "[", "item", "]", "[", "'method'", "]", ",", "data", "[", "item", "]", "[", "'status'", "]", ",", "data", "[", "item", "]", "[", "'count'", "]", ",", "minimum", ",", "maximum", ",", "'%.3f'", "%", "mean", ",", "stdev", ",", "mean_sql", ",", "mean_sqltime", "]", ")", "return", "table", ".", "draw", "(", ")" ]
Output a nicely formatted ascii table
[ "Output", "a", "nicely", "formatted", "ascii", "table" ]
train
https://github.com/garethr/django-timelog/blob/84c7015248a82faccb9d3fe4e6014645cc9ec103/src/timelog/lib.py#L48-L70
garethr/django-timelog
src/timelog/lib.py
analyze_log_file
def analyze_log_file(logfile, pattern, reverse_paths=True, progress=True): "Given a log file and regex group and extract the performance data" if progress: lines = count_lines_in(logfile) pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=lines+1).start() counter = 0 data = {} compiled_pattern = compile(pattern) for line in fileinput.input([logfile]): if progress: counter = counter + 1 parsed = compiled_pattern.findall(line)[0] date = parsed[0] method = parsed[1] path = parsed[2] status = parsed[3] time = parsed[4] sql = parsed[5] sqltime = parsed[6] try: ignore = False for ignored_path in IGNORE_PATHS: compiled_path = compile(ignored_path) if compiled_path.match(path): ignore = True if not ignore: if reverse_paths: view = view_name_from(path) else: view = path key = "%s-%s-%s" % (view, status, method) try: data[key]['count'] = data[key]['count'] + 1 data[key]['times'].append(float(time)) data[key]['sql'].append(int(sql)) data[key]['sqltime'].append(float(sqltime)) except KeyError: data[key] = { 'count': 1, 'status': status, 'view': view, 'method': method, 'times': [float(time)], 'sql': [int(sql)], 'sqltime': [float(sqltime)], } except Resolver404: pass if progress: pbar.update(counter) if progress: pbar.finish() return data
python
def analyze_log_file(logfile, pattern, reverse_paths=True, progress=True): "Given a log file and regex group and extract the performance data" if progress: lines = count_lines_in(logfile) pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=lines+1).start() counter = 0 data = {} compiled_pattern = compile(pattern) for line in fileinput.input([logfile]): if progress: counter = counter + 1 parsed = compiled_pattern.findall(line)[0] date = parsed[0] method = parsed[1] path = parsed[2] status = parsed[3] time = parsed[4] sql = parsed[5] sqltime = parsed[6] try: ignore = False for ignored_path in IGNORE_PATHS: compiled_path = compile(ignored_path) if compiled_path.match(path): ignore = True if not ignore: if reverse_paths: view = view_name_from(path) else: view = path key = "%s-%s-%s" % (view, status, method) try: data[key]['count'] = data[key]['count'] + 1 data[key]['times'].append(float(time)) data[key]['sql'].append(int(sql)) data[key]['sqltime'].append(float(sqltime)) except KeyError: data[key] = { 'count': 1, 'status': status, 'view': view, 'method': method, 'times': [float(time)], 'sql': [int(sql)], 'sqltime': [float(sqltime)], } except Resolver404: pass if progress: pbar.update(counter) if progress: pbar.finish() return data
[ "def", "analyze_log_file", "(", "logfile", ",", "pattern", ",", "reverse_paths", "=", "True", ",", "progress", "=", "True", ")", ":", "if", "progress", ":", "lines", "=", "count_lines_in", "(", "logfile", ")", "pbar", "=", "ProgressBar", "(", "widgets", "=", "[", "Percentage", "(", ")", ",", "Bar", "(", ")", "]", ",", "maxval", "=", "lines", "+", "1", ")", ".", "start", "(", ")", "counter", "=", "0", "data", "=", "{", "}", "compiled_pattern", "=", "compile", "(", "pattern", ")", "for", "line", "in", "fileinput", ".", "input", "(", "[", "logfile", "]", ")", ":", "if", "progress", ":", "counter", "=", "counter", "+", "1", "parsed", "=", "compiled_pattern", ".", "findall", "(", "line", ")", "[", "0", "]", "date", "=", "parsed", "[", "0", "]", "method", "=", "parsed", "[", "1", "]", "path", "=", "parsed", "[", "2", "]", "status", "=", "parsed", "[", "3", "]", "time", "=", "parsed", "[", "4", "]", "sql", "=", "parsed", "[", "5", "]", "sqltime", "=", "parsed", "[", "6", "]", "try", ":", "ignore", "=", "False", "for", "ignored_path", "in", "IGNORE_PATHS", ":", "compiled_path", "=", "compile", "(", "ignored_path", ")", "if", "compiled_path", ".", "match", "(", "path", ")", ":", "ignore", "=", "True", "if", "not", "ignore", ":", "if", "reverse_paths", ":", "view", "=", "view_name_from", "(", "path", ")", "else", ":", "view", "=", "path", "key", "=", "\"%s-%s-%s\"", "%", "(", "view", ",", "status", ",", "method", ")", "try", ":", "data", "[", "key", "]", "[", "'count'", "]", "=", "data", "[", "key", "]", "[", "'count'", "]", "+", "1", "data", "[", "key", "]", "[", "'times'", "]", ".", "append", "(", "float", "(", "time", ")", ")", "data", "[", "key", "]", "[", "'sql'", "]", ".", "append", "(", "int", "(", "sql", ")", ")", "data", "[", "key", "]", "[", "'sqltime'", "]", ".", "append", "(", "float", "(", "sqltime", ")", ")", "except", "KeyError", ":", "data", "[", "key", "]", "=", "{", "'count'", ":", "1", ",", "'status'", ":", "status", ",", "'view'", ":", "view", ",", "'method'", ":", "method", ",", "'times'", ":", "[", "float", "(", "time", ")", "]", ",", "'sql'", ":", "[", "int", "(", "sql", ")", "]", ",", "'sqltime'", ":", "[", "float", "(", "sqltime", ")", "]", ",", "}", "except", "Resolver404", ":", "pass", "if", "progress", ":", "pbar", ".", "update", "(", "counter", ")", "if", "progress", ":", "pbar", ".", "finish", "(", ")", "return", "data" ]
Given a log file and regex group and extract the performance data
[ "Given", "a", "log", "file", "and", "regex", "group", "and", "extract", "the", "performance", "data" ]
train
https://github.com/garethr/django-timelog/blob/84c7015248a82faccb9d3fe4e6014645cc9ec103/src/timelog/lib.py#L72-L132
hammerlab/cohorts
cohorts/collection.py
Collection.to_string
def to_string(self, limit=None): """ Create a string representation of this collection, showing up to `limit` items. """ header = self.short_string() if len(self) == 0: return header contents = "" element_lines = [ " -- %s" % (element,) for element in self.elements[:limit] ] contents = "\n".join(element_lines) if limit is not None and len(self.elements) > limit: contents += "\n ... and %d more" % (len(self) - limit) return "%s\n%s" % (header, contents)
python
def to_string(self, limit=None): """ Create a string representation of this collection, showing up to `limit` items. """ header = self.short_string() if len(self) == 0: return header contents = "" element_lines = [ " -- %s" % (element,) for element in self.elements[:limit] ] contents = "\n".join(element_lines) if limit is not None and len(self.elements) > limit: contents += "\n ... and %d more" % (len(self) - limit) return "%s\n%s" % (header, contents)
[ "def", "to_string", "(", "self", ",", "limit", "=", "None", ")", ":", "header", "=", "self", ".", "short_string", "(", ")", "if", "len", "(", "self", ")", "==", "0", ":", "return", "header", "contents", "=", "\"\"", "element_lines", "=", "[", "\" -- %s\"", "%", "(", "element", ",", ")", "for", "element", "in", "self", ".", "elements", "[", ":", "limit", "]", "]", "contents", "=", "\"\\n\"", ".", "join", "(", "element_lines", ")", "if", "limit", "is", "not", "None", "and", "len", "(", "self", ".", "elements", ")", ">", "limit", ":", "contents", "+=", "\"\\n ... and %d more\"", "%", "(", "len", "(", "self", ")", "-", "limit", ")", "return", "\"%s\\n%s\"", "%", "(", "header", ",", "contents", ")" ]
Create a string representation of this collection, showing up to `limit` items.
[ "Create", "a", "string", "representation", "of", "this", "collection", "showing", "up", "to", "limit", "items", "." ]
train
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/collection.py#L29-L46
alvarogzp/telegram-bot-framework
bot/action/standard/userinfo.py
UserStorageHandler.get_instance
def get_instance(cls, state): """:rtype: UserStorageHandler""" if cls.instance is None: cls.instance = UserStorageHandler(state) return cls.instance
python
def get_instance(cls, state): """:rtype: UserStorageHandler""" if cls.instance is None: cls.instance = UserStorageHandler(state) return cls.instance
[ "def", "get_instance", "(", "cls", ",", "state", ")", ":", "if", "cls", ".", "instance", "is", "None", ":", "cls", ".", "instance", "=", "UserStorageHandler", "(", "state", ")", "return", "cls", ".", "instance" ]
:rtype: UserStorageHandler
[ ":", "rtype", ":", "UserStorageHandler" ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/userinfo.py#L26-L30
alvarogzp/telegram-bot-framework
bot/action/standard/benchmark.py
WorkersAction._get_active_threads_names
def _get_active_threads_names(): """May contain sensitive info (like user ids). Use with care.""" active_threads = threading.enumerate() return FormattedText().join( [ FormattedText().newline().normal(" - {name}").start_format().bold(name=thread.name).end_format() for thread in active_threads ] )
python
def _get_active_threads_names(): """May contain sensitive info (like user ids). Use with care.""" active_threads = threading.enumerate() return FormattedText().join( [ FormattedText().newline().normal(" - {name}").start_format().bold(name=thread.name).end_format() for thread in active_threads ] )
[ "def", "_get_active_threads_names", "(", ")", ":", "active_threads", "=", "threading", ".", "enumerate", "(", ")", "return", "FormattedText", "(", ")", ".", "join", "(", "[", "FormattedText", "(", ")", ".", "newline", "(", ")", ".", "normal", "(", "\" - {name}\"", ")", ".", "start_format", "(", ")", ".", "bold", "(", "name", "=", "thread", ".", "name", ")", ".", "end_format", "(", ")", "for", "thread", "in", "active_threads", "]", ")" ]
May contain sensitive info (like user ids). Use with care.
[ "May", "contain", "sensitive", "info", "(", "like", "user", "ids", ")", ".", "Use", "with", "care", "." ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/benchmark.py#L164-L172
alvarogzp/telegram-bot-framework
bot/action/standard/benchmark.py
WorkersAction._get_running_workers_names
def _get_running_workers_names(running_workers: list): """May contain sensitive info (like user ids). Use with care.""" return FormattedText().join( [ FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format() for worker in running_workers ] )
python
def _get_running_workers_names(running_workers: list): """May contain sensitive info (like user ids). Use with care.""" return FormattedText().join( [ FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format() for worker in running_workers ] )
[ "def", "_get_running_workers_names", "(", "running_workers", ":", "list", ")", ":", "return", "FormattedText", "(", ")", ".", "join", "(", "[", "FormattedText", "(", ")", ".", "newline", "(", ")", ".", "normal", "(", "\" - {name}\"", ")", ".", "start_format", "(", ")", ".", "bold", "(", "name", "=", "worker", ".", "name", ")", ".", "end_format", "(", ")", "for", "worker", "in", "running_workers", "]", ")" ]
May contain sensitive info (like user ids). Use with care.
[ "May", "contain", "sensitive", "info", "(", "like", "user", "ids", ")", ".", "Use", "with", "care", "." ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/benchmark.py#L187-L194
alvarogzp/telegram-bot-framework
bot/action/standard/benchmark.py
WorkersAction._get_worker_pools_names
def _get_worker_pools_names(worker_pools: list): """May contain sensitive info (like user ids). Use with care.""" return FormattedText().join( [ FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format() for worker in worker_pools ] )
python
def _get_worker_pools_names(worker_pools: list): """May contain sensitive info (like user ids). Use with care.""" return FormattedText().join( [ FormattedText().newline().normal(" - {name}").start_format().bold(name=worker.name).end_format() for worker in worker_pools ] )
[ "def", "_get_worker_pools_names", "(", "worker_pools", ":", "list", ")", ":", "return", "FormattedText", "(", ")", ".", "join", "(", "[", "FormattedText", "(", ")", ".", "newline", "(", ")", ".", "normal", "(", "\" - {name}\"", ")", ".", "start_format", "(", ")", ".", "bold", "(", "name", "=", "worker", ".", "name", ")", ".", "end_format", "(", ")", "for", "worker", "in", "worker_pools", "]", ")" ]
May contain sensitive info (like user ids). Use with care.
[ "May", "contain", "sensitive", "info", "(", "like", "user", "ids", ")", ".", "Use", "with", "care", "." ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/benchmark.py#L209-L216
alvarogzp/telegram-bot-framework
bot/action/standard/info/formatter/user.py
UserInfoFormatter.format
def format(self, member_info: bool = False): """ :param member_info: If True, adds also chat member info. Please, note that this additional info requires to make ONE api call. """ user = self.api_object self.__format_user(user) if member_info and self.chat.type != CHAT_TYPE_PRIVATE: self._add_empty() self.__format_member(user)
python
def format(self, member_info: bool = False): """ :param member_info: If True, adds also chat member info. Please, note that this additional info requires to make ONE api call. """ user = self.api_object self.__format_user(user) if member_info and self.chat.type != CHAT_TYPE_PRIVATE: self._add_empty() self.__format_member(user)
[ "def", "format", "(", "self", ",", "member_info", ":", "bool", "=", "False", ")", ":", "user", "=", "self", ".", "api_object", "self", ".", "__format_user", "(", "user", ")", "if", "member_info", "and", "self", ".", "chat", ".", "type", "!=", "CHAT_TYPE_PRIVATE", ":", "self", ".", "_add_empty", "(", ")", "self", ".", "__format_member", "(", "user", ")" ]
:param member_info: If True, adds also chat member info. Please, note that this additional info requires to make ONE api call.
[ ":", "param", "member_info", ":", "If", "True", "adds", "also", "chat", "member", "info", ".", "Please", "note", "that", "this", "additional", "info", "requires", "to", "make", "ONE", "api", "call", "." ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/info/formatter/user.py#L19-L28
alvarogzp/telegram-bot-framework
bot/bot.py
UpdatesProcessor.safe_log_error
def safe_log_error(self, error: Exception, *info: str): """Log error failing silently on error""" self.__do_safe(lambda: self.logger.error(error, *info))
python
def safe_log_error(self, error: Exception, *info: str): """Log error failing silently on error""" self.__do_safe(lambda: self.logger.error(error, *info))
[ "def", "safe_log_error", "(", "self", ",", "error", ":", "Exception", ",", "*", "info", ":", "str", ")", ":", "self", ".", "__do_safe", "(", "lambda", ":", "self", ".", "logger", ".", "error", "(", "error", ",", "*", "info", ")", ")" ]
Log error failing silently on error
[ "Log", "error", "failing", "silently", "on", "error" ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/bot.py#L173-L175
alvarogzp/telegram-bot-framework
bot/bot.py
UpdatesProcessor.safe_log_info
def safe_log_info(self, *info: str): """Log info failing silently on error""" self.__do_safe(lambda: self.logger.info(*info))
python
def safe_log_info(self, *info: str): """Log info failing silently on error""" self.__do_safe(lambda: self.logger.info(*info))
[ "def", "safe_log_info", "(", "self", ",", "*", "info", ":", "str", ")", ":", "self", ".", "__do_safe", "(", "lambda", ":", "self", ".", "logger", ".", "info", "(", "*", "info", ")", ")" ]
Log info failing silently on error
[ "Log", "info", "failing", "silently", "on", "error" ]
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/bot.py#L177-L179
brentp/skidmarks
skidmarks.py
wald_wolfowitz
def wald_wolfowitz(sequence): """ implements the wald-wolfowitz runs test: http://en.wikipedia.org/wiki/Wald-Wolfowitz_runs_test http://support.sas.com/kb/33/092.html :param sequence: any iterable with at most 2 values. e.g. '1001001' [1, 0, 1, 0, 1] 'abaaabbba' :rtype: a dict with keys of `n_runs`: the number of runs in the sequence `p`: the support to reject the null-hypothesis that the number of runs supports a random sequence `z`: the z-score, used to calculate the p-value `sd`, `mean`: the expected standard deviation, mean the number of runs, given the ratio of numbers of 1's/0's in the sequence >>> r = wald_wolfowitz('1000001') >>> r['n_runs'] # should be 3, because 1, 0, 1 3 >>> r['p'] < 0.05 # not < 0.05 evidence to reject Ho of random sequence False # this should show significance for non-randomness >>> li = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1] >>> wald_wolfowitz(li)['p'] < 0.05 True """ R = n_runs = sum(1 for s in groupby(sequence, lambda a: a)) n = float(sum(1 for s in sequence if s == sequence[0])) m = float(sum(1 for s in sequence if s != sequence[0])) # expected mean runs ER = ((2 * n * m ) / (n + m)) + 1 # expected variance runs VR = (2 * n * m * (2 * n * m - n - m )) / ((n + m)**2 * (n + m - 1)) O = (ER - 1) * (ER - 2) / (n + m - 1.) assert VR - O < 0.001, (VR, O) SD = math.sqrt(VR) # Z-score Z = (R - ER) / SD return {'z': Z, 'mean': ER, 'sd': SD, 'p': zprob(Z), 'n_runs': R}
python
def wald_wolfowitz(sequence): """ implements the wald-wolfowitz runs test: http://en.wikipedia.org/wiki/Wald-Wolfowitz_runs_test http://support.sas.com/kb/33/092.html :param sequence: any iterable with at most 2 values. e.g. '1001001' [1, 0, 1, 0, 1] 'abaaabbba' :rtype: a dict with keys of `n_runs`: the number of runs in the sequence `p`: the support to reject the null-hypothesis that the number of runs supports a random sequence `z`: the z-score, used to calculate the p-value `sd`, `mean`: the expected standard deviation, mean the number of runs, given the ratio of numbers of 1's/0's in the sequence >>> r = wald_wolfowitz('1000001') >>> r['n_runs'] # should be 3, because 1, 0, 1 3 >>> r['p'] < 0.05 # not < 0.05 evidence to reject Ho of random sequence False # this should show significance for non-randomness >>> li = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1] >>> wald_wolfowitz(li)['p'] < 0.05 True """ R = n_runs = sum(1 for s in groupby(sequence, lambda a: a)) n = float(sum(1 for s in sequence if s == sequence[0])) m = float(sum(1 for s in sequence if s != sequence[0])) # expected mean runs ER = ((2 * n * m ) / (n + m)) + 1 # expected variance runs VR = (2 * n * m * (2 * n * m - n - m )) / ((n + m)**2 * (n + m - 1)) O = (ER - 1) * (ER - 2) / (n + m - 1.) assert VR - O < 0.001, (VR, O) SD = math.sqrt(VR) # Z-score Z = (R - ER) / SD return {'z': Z, 'mean': ER, 'sd': SD, 'p': zprob(Z), 'n_runs': R}
[ "def", "wald_wolfowitz", "(", "sequence", ")", ":", "R", "=", "n_runs", "=", "sum", "(", "1", "for", "s", "in", "groupby", "(", "sequence", ",", "lambda", "a", ":", "a", ")", ")", "n", "=", "float", "(", "sum", "(", "1", "for", "s", "in", "sequence", "if", "s", "==", "sequence", "[", "0", "]", ")", ")", "m", "=", "float", "(", "sum", "(", "1", "for", "s", "in", "sequence", "if", "s", "!=", "sequence", "[", "0", "]", ")", ")", "# expected mean runs", "ER", "=", "(", "(", "2", "*", "n", "*", "m", ")", "/", "(", "n", "+", "m", ")", ")", "+", "1", "# expected variance runs", "VR", "=", "(", "2", "*", "n", "*", "m", "*", "(", "2", "*", "n", "*", "m", "-", "n", "-", "m", ")", ")", "/", "(", "(", "n", "+", "m", ")", "**", "2", "*", "(", "n", "+", "m", "-", "1", ")", ")", "O", "=", "(", "ER", "-", "1", ")", "*", "(", "ER", "-", "2", ")", "/", "(", "n", "+", "m", "-", "1.", ")", "assert", "VR", "-", "O", "<", "0.001", ",", "(", "VR", ",", "O", ")", "SD", "=", "math", ".", "sqrt", "(", "VR", ")", "# Z-score", "Z", "=", "(", "R", "-", "ER", ")", "/", "SD", "return", "{", "'z'", ":", "Z", ",", "'mean'", ":", "ER", ",", "'sd'", ":", "SD", ",", "'p'", ":", "zprob", "(", "Z", ")", ",", "'n_runs'", ":", "R", "}" ]
implements the wald-wolfowitz runs test: http://en.wikipedia.org/wiki/Wald-Wolfowitz_runs_test http://support.sas.com/kb/33/092.html :param sequence: any iterable with at most 2 values. e.g. '1001001' [1, 0, 1, 0, 1] 'abaaabbba' :rtype: a dict with keys of `n_runs`: the number of runs in the sequence `p`: the support to reject the null-hypothesis that the number of runs supports a random sequence `z`: the z-score, used to calculate the p-value `sd`, `mean`: the expected standard deviation, mean the number of runs, given the ratio of numbers of 1's/0's in the sequence >>> r = wald_wolfowitz('1000001') >>> r['n_runs'] # should be 3, because 1, 0, 1 3 >>> r['p'] < 0.05 # not < 0.05 evidence to reject Ho of random sequence False # this should show significance for non-randomness >>> li = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1] >>> wald_wolfowitz(li)['p'] < 0.05 True
[ "implements", "the", "wald", "-", "wolfowitz", "runs", "test", ":", "http", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Wald", "-", "Wolfowitz_runs_test", "http", ":", "//", "support", ".", "sas", ".", "com", "/", "kb", "/", "33", "/", "092", ".", "html" ]
train
https://github.com/brentp/skidmarks/blob/f63b9f1b822cb47991215b655155b5041e86ea39/skidmarks.py#L51-L99
brentp/skidmarks
skidmarks.py
auto_correlation
def auto_correlation(sequence): """ test for the autocorrelation of a sequence between t and t - 1 as the 'auto_correlation' it is less likely that the sequence is generated randomly. :param sequence: any iterable with at most 2 values that can be turned into a float via np.float . e.g. '1001001' [1, 0, 1, 0, 1] [1.2,.1,.5,1] :rtype: returns a dict of the linear regression stats of sequence[1:] vs. sequence[:-1] >>> result = auto_correlation('00000001111111111100000000') >>> result['p'] < 0.05 True >>> result['auto_correlation'] 0.83766233766233755 """ if isinstance(sequence, basestring): sequence = map(int, sequence) seq = np.array(list(sequence), dtype=np.float) dseq = np.column_stack((seq[1:], seq[:-1])) slope, intercept, r, ttp, see = linregress(seq[1:], seq[:-1]) cc = np.corrcoef(dseq, rowvar=0)[0][1] return {'slope': slope, 'intercept': intercept, 'r-squared': r ** 2, 'p': ttp, 'see': see, 'auto_correlation': cc}
python
def auto_correlation(sequence): """ test for the autocorrelation of a sequence between t and t - 1 as the 'auto_correlation' it is less likely that the sequence is generated randomly. :param sequence: any iterable with at most 2 values that can be turned into a float via np.float . e.g. '1001001' [1, 0, 1, 0, 1] [1.2,.1,.5,1] :rtype: returns a dict of the linear regression stats of sequence[1:] vs. sequence[:-1] >>> result = auto_correlation('00000001111111111100000000') >>> result['p'] < 0.05 True >>> result['auto_correlation'] 0.83766233766233755 """ if isinstance(sequence, basestring): sequence = map(int, sequence) seq = np.array(list(sequence), dtype=np.float) dseq = np.column_stack((seq[1:], seq[:-1])) slope, intercept, r, ttp, see = linregress(seq[1:], seq[:-1]) cc = np.corrcoef(dseq, rowvar=0)[0][1] return {'slope': slope, 'intercept': intercept, 'r-squared': r ** 2, 'p': ttp, 'see': see, 'auto_correlation': cc}
[ "def", "auto_correlation", "(", "sequence", ")", ":", "if", "isinstance", "(", "sequence", ",", "basestring", ")", ":", "sequence", "=", "map", "(", "int", ",", "sequence", ")", "seq", "=", "np", ".", "array", "(", "list", "(", "sequence", ")", ",", "dtype", "=", "np", ".", "float", ")", "dseq", "=", "np", ".", "column_stack", "(", "(", "seq", "[", "1", ":", "]", ",", "seq", "[", ":", "-", "1", "]", ")", ")", "slope", ",", "intercept", ",", "r", ",", "ttp", ",", "see", "=", "linregress", "(", "seq", "[", "1", ":", "]", ",", "seq", "[", ":", "-", "1", "]", ")", "cc", "=", "np", ".", "corrcoef", "(", "dseq", ",", "rowvar", "=", "0", ")", "[", "0", "]", "[", "1", "]", "return", "{", "'slope'", ":", "slope", ",", "'intercept'", ":", "intercept", ",", "'r-squared'", ":", "r", "**", "2", ",", "'p'", ":", "ttp", ",", "'see'", ":", "see", ",", "'auto_correlation'", ":", "cc", "}" ]
test for the autocorrelation of a sequence between t and t - 1 as the 'auto_correlation' it is less likely that the sequence is generated randomly. :param sequence: any iterable with at most 2 values that can be turned into a float via np.float . e.g. '1001001' [1, 0, 1, 0, 1] [1.2,.1,.5,1] :rtype: returns a dict of the linear regression stats of sequence[1:] vs. sequence[:-1] >>> result = auto_correlation('00000001111111111100000000') >>> result['p'] < 0.05 True >>> result['auto_correlation'] 0.83766233766233755
[ "test", "for", "the", "autocorrelation", "of", "a", "sequence", "between", "t", "and", "t", "-", "1", "as", "the", "auto_correlation", "it", "is", "less", "likely", "that", "the", "sequence", "is", "generated", "randomly", ".", ":", "param", "sequence", ":", "any", "iterable", "with", "at", "most", "2", "values", "that", "can", "be", "turned", "into", "a", "float", "via", "np", ".", "float", ".", "e", ".", "g", ".", "1001001", "[", "1", "0", "1", "0", "1", "]", "[", "1", ".", "2", ".", "1", ".", "5", "1", "]", ":", "rtype", ":", "returns", "a", "dict", "of", "the", "linear", "regression", "stats", "of", "sequence", "[", "1", ":", "]", "vs", ".", "sequence", "[", ":", "-", "1", "]" ]
train
https://github.com/brentp/skidmarks/blob/f63b9f1b822cb47991215b655155b5041e86ea39/skidmarks.py#L102-L129
twisted/txacme
src/txacme/client.py
_parse_header_links
def _parse_header_links(response): """ Parse the links from a Link: header field. .. todo:: Links with the same relation collide at the moment. :param bytes value: The header value. :rtype: `dict` :return: A dictionary of parsed links, keyed by ``rel`` or ``url``. """ values = response.headers.getRawHeaders(b'link', [b'']) value = b','.join(values).decode('ascii') with LOG_HTTP_PARSE_LINKS(raw_link=value) as action: links = {} replace_chars = u' \'"' for val in re.split(u', *<', value): try: url, params = val.split(u';', 1) except ValueError: url, params = val, u'' link = {} link[u'url'] = url.strip(u'<> \'"') for param in params.split(u';'): try: key, value = param.split(u'=') except ValueError: break link[key.strip(replace_chars)] = value.strip(replace_chars) links[link.get(u'rel') or link.get(u'url')] = link action.add_success_fields(parsed_links=links) return links
python
def _parse_header_links(response): """ Parse the links from a Link: header field. .. todo:: Links with the same relation collide at the moment. :param bytes value: The header value. :rtype: `dict` :return: A dictionary of parsed links, keyed by ``rel`` or ``url``. """ values = response.headers.getRawHeaders(b'link', [b'']) value = b','.join(values).decode('ascii') with LOG_HTTP_PARSE_LINKS(raw_link=value) as action: links = {} replace_chars = u' \'"' for val in re.split(u', *<', value): try: url, params = val.split(u';', 1) except ValueError: url, params = val, u'' link = {} link[u'url'] = url.strip(u'<> \'"') for param in params.split(u';'): try: key, value = param.split(u'=') except ValueError: break link[key.strip(replace_chars)] = value.strip(replace_chars) links[link.get(u'rel') or link.get(u'url')] = link action.add_success_fields(parsed_links=links) return links
[ "def", "_parse_header_links", "(", "response", ")", ":", "values", "=", "response", ".", "headers", ".", "getRawHeaders", "(", "b'link'", ",", "[", "b''", "]", ")", "value", "=", "b','", ".", "join", "(", "values", ")", ".", "decode", "(", "'ascii'", ")", "with", "LOG_HTTP_PARSE_LINKS", "(", "raw_link", "=", "value", ")", "as", "action", ":", "links", "=", "{", "}", "replace_chars", "=", "u' \\'\"'", "for", "val", "in", "re", ".", "split", "(", "u', *<'", ",", "value", ")", ":", "try", ":", "url", ",", "params", "=", "val", ".", "split", "(", "u';'", ",", "1", ")", "except", "ValueError", ":", "url", ",", "params", "=", "val", ",", "u''", "link", "=", "{", "}", "link", "[", "u'url'", "]", "=", "url", ".", "strip", "(", "u'<> \\'\"'", ")", "for", "param", "in", "params", ".", "split", "(", "u';'", ")", ":", "try", ":", "key", ",", "value", "=", "param", ".", "split", "(", "u'='", ")", "except", "ValueError", ":", "break", "link", "[", "key", ".", "strip", "(", "replace_chars", ")", "]", "=", "value", ".", "strip", "(", "replace_chars", ")", "links", "[", "link", ".", "get", "(", "u'rel'", ")", "or", "link", ".", "get", "(", "u'url'", ")", "]", "=", "link", "action", ".", "add_success_fields", "(", "parsed_links", "=", "links", ")", "return", "links" ]
Parse the links from a Link: header field. .. todo:: Links with the same relation collide at the moment. :param bytes value: The header value. :rtype: `dict` :return: A dictionary of parsed links, keyed by ``rel`` or ``url``.
[ "Parse", "the", "links", "from", "a", "Link", ":", "header", "field", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L37-L69
twisted/txacme
src/txacme/client.py
_default_client
def _default_client(jws_client, reactor, key, alg): """ Make a client if we didn't get one. """ if jws_client is None: pool = HTTPConnectionPool(reactor) agent = Agent(reactor, pool=pool) jws_client = JWSClient(HTTPClient(agent=agent), key, alg) return jws_client
python
def _default_client(jws_client, reactor, key, alg): """ Make a client if we didn't get one. """ if jws_client is None: pool = HTTPConnectionPool(reactor) agent = Agent(reactor, pool=pool) jws_client = JWSClient(HTTPClient(agent=agent), key, alg) return jws_client
[ "def", "_default_client", "(", "jws_client", ",", "reactor", ",", "key", ",", "alg", ")", ":", "if", "jws_client", "is", "None", ":", "pool", "=", "HTTPConnectionPool", "(", "reactor", ")", "agent", "=", "Agent", "(", "reactor", ",", "pool", "=", "pool", ")", "jws_client", "=", "JWSClient", "(", "HTTPClient", "(", "agent", "=", "agent", ")", ",", "key", ",", "alg", ")", "return", "jws_client" ]
Make a client if we didn't get one.
[ "Make", "a", "client", "if", "we", "didn", "t", "get", "one", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L72-L80
twisted/txacme
src/txacme/client.py
_find_supported_challenge
def _find_supported_challenge(authzr, responders): """ Find a challenge combination that consists of a single challenge that the responder can satisfy. :param ~acme.messages.AuthorizationResource auth: The authorization to examine. :type responder: List[`~txacme.interfaces.IResponder`] :param responder: The possible responders to use. :raises NoSupportedChallenges: When a suitable challenge combination is not found. :rtype: Tuple[`~txacme.interfaces.IResponder`, `~acme.messages.ChallengeBody`] :return: The responder and challenge that were found. """ matches = [ (responder, challbs[0]) for challbs in authzr.body.resolved_combinations for responder in responders if [challb.typ for challb in challbs] == [responder.challenge_type]] if len(matches) == 0: raise NoSupportedChallenges(authzr) else: return matches[0]
python
def _find_supported_challenge(authzr, responders): """ Find a challenge combination that consists of a single challenge that the responder can satisfy. :param ~acme.messages.AuthorizationResource auth: The authorization to examine. :type responder: List[`~txacme.interfaces.IResponder`] :param responder: The possible responders to use. :raises NoSupportedChallenges: When a suitable challenge combination is not found. :rtype: Tuple[`~txacme.interfaces.IResponder`, `~acme.messages.ChallengeBody`] :return: The responder and challenge that were found. """ matches = [ (responder, challbs[0]) for challbs in authzr.body.resolved_combinations for responder in responders if [challb.typ for challb in challbs] == [responder.challenge_type]] if len(matches) == 0: raise NoSupportedChallenges(authzr) else: return matches[0]
[ "def", "_find_supported_challenge", "(", "authzr", ",", "responders", ")", ":", "matches", "=", "[", "(", "responder", ",", "challbs", "[", "0", "]", ")", "for", "challbs", "in", "authzr", ".", "body", ".", "resolved_combinations", "for", "responder", "in", "responders", "if", "[", "challb", ".", "typ", "for", "challb", "in", "challbs", "]", "==", "[", "responder", ".", "challenge_type", "]", "]", "if", "len", "(", "matches", ")", "==", "0", ":", "raise", "NoSupportedChallenges", "(", "authzr", ")", "else", ":", "return", "matches", "[", "0", "]" ]
Find a challenge combination that consists of a single challenge that the responder can satisfy. :param ~acme.messages.AuthorizationResource auth: The authorization to examine. :type responder: List[`~txacme.interfaces.IResponder`] :param responder: The possible responders to use. :raises NoSupportedChallenges: When a suitable challenge combination is not found. :rtype: Tuple[`~txacme.interfaces.IResponder`, `~acme.messages.ChallengeBody`] :return: The responder and challenge that were found.
[ "Find", "a", "challenge", "combination", "that", "consists", "of", "a", "single", "challenge", "that", "the", "responder", "can", "satisfy", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L505-L531
twisted/txacme
src/txacme/client.py
answer_challenge
def answer_challenge(authzr, client, responders): """ Complete an authorization using a responder. :param ~acme.messages.AuthorizationResource auth: The authorization to complete. :param .Client client: The ACME client. :type responders: List[`~txacme.interfaces.IResponder`] :param responders: A list of responders that can be used to complete the challenge with. :return: A deferred firing when the authorization is verified. """ responder, challb = _find_supported_challenge(authzr, responders) response = challb.response(client.key) def _stop_responding(): return maybeDeferred( responder.stop_responding, authzr.body.identifier.value, challb.chall, response) return ( maybeDeferred( responder.start_responding, authzr.body.identifier.value, challb.chall, response) .addCallback(lambda _: client.answer_challenge(challb, response)) .addCallback(lambda _: _stop_responding) )
python
def answer_challenge(authzr, client, responders): """ Complete an authorization using a responder. :param ~acme.messages.AuthorizationResource auth: The authorization to complete. :param .Client client: The ACME client. :type responders: List[`~txacme.interfaces.IResponder`] :param responders: A list of responders that can be used to complete the challenge with. :return: A deferred firing when the authorization is verified. """ responder, challb = _find_supported_challenge(authzr, responders) response = challb.response(client.key) def _stop_responding(): return maybeDeferred( responder.stop_responding, authzr.body.identifier.value, challb.chall, response) return ( maybeDeferred( responder.start_responding, authzr.body.identifier.value, challb.chall, response) .addCallback(lambda _: client.answer_challenge(challb, response)) .addCallback(lambda _: _stop_responding) )
[ "def", "answer_challenge", "(", "authzr", ",", "client", ",", "responders", ")", ":", "responder", ",", "challb", "=", "_find_supported_challenge", "(", "authzr", ",", "responders", ")", "response", "=", "challb", ".", "response", "(", "client", ".", "key", ")", "def", "_stop_responding", "(", ")", ":", "return", "maybeDeferred", "(", "responder", ".", "stop_responding", ",", "authzr", ".", "body", ".", "identifier", ".", "value", ",", "challb", ".", "chall", ",", "response", ")", "return", "(", "maybeDeferred", "(", "responder", ".", "start_responding", ",", "authzr", ".", "body", ".", "identifier", ".", "value", ",", "challb", ".", "chall", ",", "response", ")", ".", "addCallback", "(", "lambda", "_", ":", "client", ".", "answer_challenge", "(", "challb", ",", "response", ")", ")", ".", "addCallback", "(", "lambda", "_", ":", "_stop_responding", ")", ")" ]
Complete an authorization using a responder. :param ~acme.messages.AuthorizationResource auth: The authorization to complete. :param .Client client: The ACME client. :type responders: List[`~txacme.interfaces.IResponder`] :param responders: A list of responders that can be used to complete the challenge with. :return: A deferred firing when the authorization is verified.
[ "Complete", "an", "authorization", "using", "a", "responder", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L534-L565
twisted/txacme
src/txacme/client.py
poll_until_valid
def poll_until_valid(authzr, clock, client, timeout=300.0): """ Poll an authorization until it is in a state other than pending or processing. :param ~acme.messages.AuthorizationResource auth: The authorization to complete. :param clock: The ``IReactorTime`` implementation to use; usually the reactor, when not testing. :param .Client client: The ACME client. :param float timeout: Maximum time to poll in seconds, before giving up. :raises txacme.client.AuthorizationFailed: if the authorization is no longer in the pending, processing, or valid states. :raises: ``twisted.internet.defer.CancelledError`` if the authorization was still in pending or processing state when the timeout was reached. :rtype: Deferred[`~acme.messages.AuthorizationResource`] :return: A deferred firing when the authorization has completed/failed; if the authorization is valid, the authorization resource will be returned. """ def repoll(result): authzr, retry_after = result if authzr.body.status in {STATUS_PENDING, STATUS_PROCESSING}: return ( deferLater(clock, retry_after, lambda: None) .addCallback(lambda _: client.poll(authzr)) .addCallback(repoll) ) if authzr.body.status != STATUS_VALID: raise AuthorizationFailed(authzr) return authzr def cancel_timeout(result): if timeout_call.active(): timeout_call.cancel() return result d = client.poll(authzr).addCallback(repoll) timeout_call = clock.callLater(timeout, d.cancel) d.addBoth(cancel_timeout) return d
python
def poll_until_valid(authzr, clock, client, timeout=300.0): """ Poll an authorization until it is in a state other than pending or processing. :param ~acme.messages.AuthorizationResource auth: The authorization to complete. :param clock: The ``IReactorTime`` implementation to use; usually the reactor, when not testing. :param .Client client: The ACME client. :param float timeout: Maximum time to poll in seconds, before giving up. :raises txacme.client.AuthorizationFailed: if the authorization is no longer in the pending, processing, or valid states. :raises: ``twisted.internet.defer.CancelledError`` if the authorization was still in pending or processing state when the timeout was reached. :rtype: Deferred[`~acme.messages.AuthorizationResource`] :return: A deferred firing when the authorization has completed/failed; if the authorization is valid, the authorization resource will be returned. """ def repoll(result): authzr, retry_after = result if authzr.body.status in {STATUS_PENDING, STATUS_PROCESSING}: return ( deferLater(clock, retry_after, lambda: None) .addCallback(lambda _: client.poll(authzr)) .addCallback(repoll) ) if authzr.body.status != STATUS_VALID: raise AuthorizationFailed(authzr) return authzr def cancel_timeout(result): if timeout_call.active(): timeout_call.cancel() return result d = client.poll(authzr).addCallback(repoll) timeout_call = clock.callLater(timeout, d.cancel) d.addBoth(cancel_timeout) return d
[ "def", "poll_until_valid", "(", "authzr", ",", "clock", ",", "client", ",", "timeout", "=", "300.0", ")", ":", "def", "repoll", "(", "result", ")", ":", "authzr", ",", "retry_after", "=", "result", "if", "authzr", ".", "body", ".", "status", "in", "{", "STATUS_PENDING", ",", "STATUS_PROCESSING", "}", ":", "return", "(", "deferLater", "(", "clock", ",", "retry_after", ",", "lambda", ":", "None", ")", ".", "addCallback", "(", "lambda", "_", ":", "client", ".", "poll", "(", "authzr", ")", ")", ".", "addCallback", "(", "repoll", ")", ")", "if", "authzr", ".", "body", ".", "status", "!=", "STATUS_VALID", ":", "raise", "AuthorizationFailed", "(", "authzr", ")", "return", "authzr", "def", "cancel_timeout", "(", "result", ")", ":", "if", "timeout_call", ".", "active", "(", ")", ":", "timeout_call", ".", "cancel", "(", ")", "return", "result", "d", "=", "client", ".", "poll", "(", "authzr", ")", ".", "addCallback", "(", "repoll", ")", "timeout_call", "=", "clock", ".", "callLater", "(", "timeout", ",", "d", ".", "cancel", ")", "d", ".", "addBoth", "(", "cancel_timeout", ")", "return", "d" ]
Poll an authorization until it is in a state other than pending or processing. :param ~acme.messages.AuthorizationResource auth: The authorization to complete. :param clock: The ``IReactorTime`` implementation to use; usually the reactor, when not testing. :param .Client client: The ACME client. :param float timeout: Maximum time to poll in seconds, before giving up. :raises txacme.client.AuthorizationFailed: if the authorization is no longer in the pending, processing, or valid states. :raises: ``twisted.internet.defer.CancelledError`` if the authorization was still in pending or processing state when the timeout was reached. :rtype: Deferred[`~acme.messages.AuthorizationResource`] :return: A deferred firing when the authorization has completed/failed; if the authorization is valid, the authorization resource will be returned.
[ "Poll", "an", "authorization", "until", "it", "is", "in", "a", "state", "other", "than", "pending", "or", "processing", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L568-L609
twisted/txacme
src/txacme/client.py
Client.from_url
def from_url(cls, reactor, url, key, alg=RS256, jws_client=None): """ Construct a client from an ACME directory at a given URL. :param url: The ``twisted.python.url.URL`` to fetch the directory from. See `txacme.urls` for constants for various well-known public directories. :param reactor: The Twisted reactor to use. :param ~josepy.jwk.JWK key: The client key to use. :param alg: The signing algorithm to use. Needs to be compatible with the type of key used. :param JWSClient jws_client: The underlying client to use, or ``None`` to construct one. :return: The constructed client. :rtype: Deferred[`Client`] """ action = LOG_ACME_CONSUME_DIRECTORY( url=url, key_type=key.typ, alg=alg.name) with action.context(): check_directory_url_type(url) jws_client = _default_client(jws_client, reactor, key, alg) return ( DeferredContext(jws_client.get(url.asText())) .addCallback(json_content) .addCallback(messages.Directory.from_json) .addCallback( tap(lambda d: action.add_success_fields(directory=d))) .addCallback(cls, reactor, key, jws_client) .addActionFinish())
python
def from_url(cls, reactor, url, key, alg=RS256, jws_client=None): """ Construct a client from an ACME directory at a given URL. :param url: The ``twisted.python.url.URL`` to fetch the directory from. See `txacme.urls` for constants for various well-known public directories. :param reactor: The Twisted reactor to use. :param ~josepy.jwk.JWK key: The client key to use. :param alg: The signing algorithm to use. Needs to be compatible with the type of key used. :param JWSClient jws_client: The underlying client to use, or ``None`` to construct one. :return: The constructed client. :rtype: Deferred[`Client`] """ action = LOG_ACME_CONSUME_DIRECTORY( url=url, key_type=key.typ, alg=alg.name) with action.context(): check_directory_url_type(url) jws_client = _default_client(jws_client, reactor, key, alg) return ( DeferredContext(jws_client.get(url.asText())) .addCallback(json_content) .addCallback(messages.Directory.from_json) .addCallback( tap(lambda d: action.add_success_fields(directory=d))) .addCallback(cls, reactor, key, jws_client) .addActionFinish())
[ "def", "from_url", "(", "cls", ",", "reactor", ",", "url", ",", "key", ",", "alg", "=", "RS256", ",", "jws_client", "=", "None", ")", ":", "action", "=", "LOG_ACME_CONSUME_DIRECTORY", "(", "url", "=", "url", ",", "key_type", "=", "key", ".", "typ", ",", "alg", "=", "alg", ".", "name", ")", "with", "action", ".", "context", "(", ")", ":", "check_directory_url_type", "(", "url", ")", "jws_client", "=", "_default_client", "(", "jws_client", ",", "reactor", ",", "key", ",", "alg", ")", "return", "(", "DeferredContext", "(", "jws_client", ".", "get", "(", "url", ".", "asText", "(", ")", ")", ")", ".", "addCallback", "(", "json_content", ")", ".", "addCallback", "(", "messages", ".", "Directory", ".", "from_json", ")", ".", "addCallback", "(", "tap", "(", "lambda", "d", ":", "action", ".", "add_success_fields", "(", "directory", "=", "d", ")", ")", ")", ".", "addCallback", "(", "cls", ",", "reactor", ",", "key", ",", "jws_client", ")", ".", "addActionFinish", "(", ")", ")" ]
Construct a client from an ACME directory at a given URL. :param url: The ``twisted.python.url.URL`` to fetch the directory from. See `txacme.urls` for constants for various well-known public directories. :param reactor: The Twisted reactor to use. :param ~josepy.jwk.JWK key: The client key to use. :param alg: The signing algorithm to use. Needs to be compatible with the type of key used. :param JWSClient jws_client: The underlying client to use, or ``None`` to construct one. :return: The constructed client. :rtype: Deferred[`Client`]
[ "Construct", "a", "client", "from", "an", "ACME", "directory", "at", "a", "given", "URL", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L109-L138
twisted/txacme
src/txacme/client.py
Client.register
def register(self, new_reg=None): """ Create a new registration with the ACME server. :param ~acme.messages.NewRegistration new_reg: The registration message to use, or ``None`` to construct one. :return: The registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] """ if new_reg is None: new_reg = messages.NewRegistration() action = LOG_ACME_REGISTER(registration=new_reg) with action.context(): return ( DeferredContext( self.update_registration( new_reg, uri=self.directory[new_reg])) .addErrback(self._maybe_registered, new_reg) .addCallback( tap(lambda r: action.add_success_fields(registration=r))) .addActionFinish())
python
def register(self, new_reg=None): """ Create a new registration with the ACME server. :param ~acme.messages.NewRegistration new_reg: The registration message to use, or ``None`` to construct one. :return: The registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] """ if new_reg is None: new_reg = messages.NewRegistration() action = LOG_ACME_REGISTER(registration=new_reg) with action.context(): return ( DeferredContext( self.update_registration( new_reg, uri=self.directory[new_reg])) .addErrback(self._maybe_registered, new_reg) .addCallback( tap(lambda r: action.add_success_fields(registration=r))) .addActionFinish())
[ "def", "register", "(", "self", ",", "new_reg", "=", "None", ")", ":", "if", "new_reg", "is", "None", ":", "new_reg", "=", "messages", ".", "NewRegistration", "(", ")", "action", "=", "LOG_ACME_REGISTER", "(", "registration", "=", "new_reg", ")", "with", "action", ".", "context", "(", ")", ":", "return", "(", "DeferredContext", "(", "self", ".", "update_registration", "(", "new_reg", ",", "uri", "=", "self", ".", "directory", "[", "new_reg", "]", ")", ")", ".", "addErrback", "(", "self", ".", "_maybe_registered", ",", "new_reg", ")", ".", "addCallback", "(", "tap", "(", "lambda", "r", ":", "action", ".", "add_success_fields", "(", "registration", "=", "r", ")", ")", ")", ".", "addActionFinish", "(", ")", ")" ]
Create a new registration with the ACME server. :param ~acme.messages.NewRegistration new_reg: The registration message to use, or ``None`` to construct one. :return: The registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`]
[ "Create", "a", "new", "registration", "with", "the", "ACME", "server", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L140-L161
twisted/txacme
src/txacme/client.py
Client._maybe_location
def _maybe_location(cls, response, uri=None): """ Get the Location: if there is one. """ location = response.headers.getRawHeaders(b'location', [None])[0] if location is not None: return location.decode('ascii') return uri
python
def _maybe_location(cls, response, uri=None): """ Get the Location: if there is one. """ location = response.headers.getRawHeaders(b'location', [None])[0] if location is not None: return location.decode('ascii') return uri
[ "def", "_maybe_location", "(", "cls", ",", "response", ",", "uri", "=", "None", ")", ":", "location", "=", "response", ".", "headers", ".", "getRawHeaders", "(", "b'location'", ",", "[", "None", "]", ")", "[", "0", "]", "if", "location", "is", "not", "None", ":", "return", "location", ".", "decode", "(", "'ascii'", ")", "return", "uri" ]
Get the Location: if there is one.
[ "Get", "the", "Location", ":", "if", "there", "is", "one", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L164-L171
twisted/txacme
src/txacme/client.py
Client._maybe_registered
def _maybe_registered(self, failure, new_reg): """ If the registration already exists, we should just load it. """ failure.trap(ServerError) response = failure.value.response if response.code == http.CONFLICT: reg = new_reg.update( resource=messages.UpdateRegistration.resource_type) uri = self._maybe_location(response) return self.update_registration(reg, uri=uri) return failure
python
def _maybe_registered(self, failure, new_reg): """ If the registration already exists, we should just load it. """ failure.trap(ServerError) response = failure.value.response if response.code == http.CONFLICT: reg = new_reg.update( resource=messages.UpdateRegistration.resource_type) uri = self._maybe_location(response) return self.update_registration(reg, uri=uri) return failure
[ "def", "_maybe_registered", "(", "self", ",", "failure", ",", "new_reg", ")", ":", "failure", ".", "trap", "(", "ServerError", ")", "response", "=", "failure", ".", "value", ".", "response", "if", "response", ".", "code", "==", "http", ".", "CONFLICT", ":", "reg", "=", "new_reg", ".", "update", "(", "resource", "=", "messages", ".", "UpdateRegistration", ".", "resource_type", ")", "uri", "=", "self", ".", "_maybe_location", "(", "response", ")", "return", "self", ".", "update_registration", "(", "reg", ",", "uri", "=", "uri", ")", "return", "failure" ]
If the registration already exists, we should just load it.
[ "If", "the", "registration", "already", "exists", "we", "should", "just", "load", "it", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L173-L184
twisted/txacme
src/txacme/client.py
Client.agree_to_tos
def agree_to_tos(self, regr): """ Accept the terms-of-service for a registration. :param ~acme.messages.RegistrationResource regr: The registration to update. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] """ return self.update_registration( regr.update( body=regr.body.update( agreement=regr.terms_of_service)))
python
def agree_to_tos(self, regr): """ Accept the terms-of-service for a registration. :param ~acme.messages.RegistrationResource regr: The registration to update. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] """ return self.update_registration( regr.update( body=regr.body.update( agreement=regr.terms_of_service)))
[ "def", "agree_to_tos", "(", "self", ",", "regr", ")", ":", "return", "self", ".", "update_registration", "(", "regr", ".", "update", "(", "body", "=", "regr", ".", "body", ".", "update", "(", "agreement", "=", "regr", ".", "terms_of_service", ")", ")", ")" ]
Accept the terms-of-service for a registration. :param ~acme.messages.RegistrationResource regr: The registration to update. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`]
[ "Accept", "the", "terms", "-", "of", "-", "service", "for", "a", "registration", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L186-L199
twisted/txacme
src/txacme/client.py
Client.update_registration
def update_registration(self, regr, uri=None): """ Submit a registration to the server to update it. :param ~acme.messages.RegistrationResource regr: The registration to update. Can be a :class:`~acme.messages.NewRegistration` instead, in order to create a new registration. :param str uri: The url to submit to. Must be specified if a :class:`~acme.messages.NewRegistration` is provided. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] """ if uri is None: uri = regr.uri if isinstance(regr, messages.RegistrationResource): message = messages.UpdateRegistration(**dict(regr.body)) else: message = regr action = LOG_ACME_UPDATE_REGISTRATION(uri=uri, registration=message) with action.context(): return ( DeferredContext(self._client.post(uri, message)) .addCallback(self._parse_regr_response, uri=uri) .addCallback(self._check_regr, regr) .addCallback( tap(lambda r: action.add_success_fields(registration=r))) .addActionFinish())
python
def update_registration(self, regr, uri=None): """ Submit a registration to the server to update it. :param ~acme.messages.RegistrationResource regr: The registration to update. Can be a :class:`~acme.messages.NewRegistration` instead, in order to create a new registration. :param str uri: The url to submit to. Must be specified if a :class:`~acme.messages.NewRegistration` is provided. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] """ if uri is None: uri = regr.uri if isinstance(regr, messages.RegistrationResource): message = messages.UpdateRegistration(**dict(regr.body)) else: message = regr action = LOG_ACME_UPDATE_REGISTRATION(uri=uri, registration=message) with action.context(): return ( DeferredContext(self._client.post(uri, message)) .addCallback(self._parse_regr_response, uri=uri) .addCallback(self._check_regr, regr) .addCallback( tap(lambda r: action.add_success_fields(registration=r))) .addActionFinish())
[ "def", "update_registration", "(", "self", ",", "regr", ",", "uri", "=", "None", ")", ":", "if", "uri", "is", "None", ":", "uri", "=", "regr", ".", "uri", "if", "isinstance", "(", "regr", ",", "messages", ".", "RegistrationResource", ")", ":", "message", "=", "messages", ".", "UpdateRegistration", "(", "*", "*", "dict", "(", "regr", ".", "body", ")", ")", "else", ":", "message", "=", "regr", "action", "=", "LOG_ACME_UPDATE_REGISTRATION", "(", "uri", "=", "uri", ",", "registration", "=", "message", ")", "with", "action", ".", "context", "(", ")", ":", "return", "(", "DeferredContext", "(", "self", ".", "_client", ".", "post", "(", "uri", ",", "message", ")", ")", ".", "addCallback", "(", "self", ".", "_parse_regr_response", ",", "uri", "=", "uri", ")", ".", "addCallback", "(", "self", ".", "_check_regr", ",", "regr", ")", ".", "addCallback", "(", "tap", "(", "lambda", "r", ":", "action", ".", "add_success_fields", "(", "registration", "=", "r", ")", ")", ")", ".", "addActionFinish", "(", ")", ")" ]
Submit a registration to the server to update it. :param ~acme.messages.RegistrationResource regr: The registration to update. Can be a :class:`~acme.messages.NewRegistration` instead, in order to create a new registration. :param str uri: The url to submit to. Must be specified if a :class:`~acme.messages.NewRegistration` is provided. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`]
[ "Submit", "a", "registration", "to", "the", "server", "to", "update", "it", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L201-L228
twisted/txacme
src/txacme/client.py
Client._parse_regr_response
def _parse_regr_response(self, response, uri=None, new_authzr_uri=None, terms_of_service=None): """ Parse a registration response from the server. """ links = _parse_header_links(response) if u'terms-of-service' in links: terms_of_service = links[u'terms-of-service'][u'url'] if u'next' in links: new_authzr_uri = links[u'next'][u'url'] if new_authzr_uri is None: raise errors.ClientError('"next" link missing') return ( response.json() .addCallback( lambda body: messages.RegistrationResource( body=messages.Registration.from_json(body), uri=self._maybe_location(response, uri=uri), new_authzr_uri=new_authzr_uri, terms_of_service=terms_of_service)) )
python
def _parse_regr_response(self, response, uri=None, new_authzr_uri=None, terms_of_service=None): """ Parse a registration response from the server. """ links = _parse_header_links(response) if u'terms-of-service' in links: terms_of_service = links[u'terms-of-service'][u'url'] if u'next' in links: new_authzr_uri = links[u'next'][u'url'] if new_authzr_uri is None: raise errors.ClientError('"next" link missing') return ( response.json() .addCallback( lambda body: messages.RegistrationResource( body=messages.Registration.from_json(body), uri=self._maybe_location(response, uri=uri), new_authzr_uri=new_authzr_uri, terms_of_service=terms_of_service)) )
[ "def", "_parse_regr_response", "(", "self", ",", "response", ",", "uri", "=", "None", ",", "new_authzr_uri", "=", "None", ",", "terms_of_service", "=", "None", ")", ":", "links", "=", "_parse_header_links", "(", "response", ")", "if", "u'terms-of-service'", "in", "links", ":", "terms_of_service", "=", "links", "[", "u'terms-of-service'", "]", "[", "u'url'", "]", "if", "u'next'", "in", "links", ":", "new_authzr_uri", "=", "links", "[", "u'next'", "]", "[", "u'url'", "]", "if", "new_authzr_uri", "is", "None", ":", "raise", "errors", ".", "ClientError", "(", "'\"next\" link missing'", ")", "return", "(", "response", ".", "json", "(", ")", ".", "addCallback", "(", "lambda", "body", ":", "messages", ".", "RegistrationResource", "(", "body", "=", "messages", ".", "Registration", ".", "from_json", "(", "body", ")", ",", "uri", "=", "self", ".", "_maybe_location", "(", "response", ",", "uri", "=", "uri", ")", ",", "new_authzr_uri", "=", "new_authzr_uri", ",", "terms_of_service", "=", "terms_of_service", ")", ")", ")" ]
Parse a registration response from the server.
[ "Parse", "a", "registration", "response", "from", "the", "server", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L230-L251
twisted/txacme
src/txacme/client.py
Client._check_regr
def _check_regr(self, regr, new_reg): """ Check that a registration response contains the registration we were expecting. """ body = getattr(new_reg, 'body', new_reg) for k, v in body.items(): if k == 'resource' or not v: continue if regr.body[k] != v: raise errors.UnexpectedUpdate(regr) if regr.body.key != self.key.public_key(): raise errors.UnexpectedUpdate(regr) return regr
python
def _check_regr(self, regr, new_reg): """ Check that a registration response contains the registration we were expecting. """ body = getattr(new_reg, 'body', new_reg) for k, v in body.items(): if k == 'resource' or not v: continue if regr.body[k] != v: raise errors.UnexpectedUpdate(regr) if regr.body.key != self.key.public_key(): raise errors.UnexpectedUpdate(regr) return regr
[ "def", "_check_regr", "(", "self", ",", "regr", ",", "new_reg", ")", ":", "body", "=", "getattr", "(", "new_reg", ",", "'body'", ",", "new_reg", ")", "for", "k", ",", "v", "in", "body", ".", "items", "(", ")", ":", "if", "k", "==", "'resource'", "or", "not", "v", ":", "continue", "if", "regr", ".", "body", "[", "k", "]", "!=", "v", ":", "raise", "errors", ".", "UnexpectedUpdate", "(", "regr", ")", "if", "regr", ".", "body", ".", "key", "!=", "self", ".", "key", ".", "public_key", "(", ")", ":", "raise", "errors", ".", "UnexpectedUpdate", "(", "regr", ")", "return", "regr" ]
Check that a registration response contains the registration we were expecting.
[ "Check", "that", "a", "registration", "response", "contains", "the", "registration", "we", "were", "expecting", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L253-L266
twisted/txacme
src/txacme/client.py
Client.request_challenges
def request_challenges(self, identifier): """ Create a new authorization. :param ~acme.messages.Identifier identifier: The identifier to authorize. :return: The new authorization resource. :rtype: Deferred[`~acme.messages.AuthorizationResource`] """ action = LOG_ACME_CREATE_AUTHORIZATION(identifier=identifier) with action.context(): message = messages.NewAuthorization(identifier=identifier) return ( DeferredContext( self._client.post(self.directory[message], message)) .addCallback(self._expect_response, http.CREATED) .addCallback(self._parse_authorization) .addCallback(self._check_authorization, identifier) .addCallback( tap(lambda a: action.add_success_fields(authorization=a))) .addActionFinish())
python
def request_challenges(self, identifier): """ Create a new authorization. :param ~acme.messages.Identifier identifier: The identifier to authorize. :return: The new authorization resource. :rtype: Deferred[`~acme.messages.AuthorizationResource`] """ action = LOG_ACME_CREATE_AUTHORIZATION(identifier=identifier) with action.context(): message = messages.NewAuthorization(identifier=identifier) return ( DeferredContext( self._client.post(self.directory[message], message)) .addCallback(self._expect_response, http.CREATED) .addCallback(self._parse_authorization) .addCallback(self._check_authorization, identifier) .addCallback( tap(lambda a: action.add_success_fields(authorization=a))) .addActionFinish())
[ "def", "request_challenges", "(", "self", ",", "identifier", ")", ":", "action", "=", "LOG_ACME_CREATE_AUTHORIZATION", "(", "identifier", "=", "identifier", ")", "with", "action", ".", "context", "(", ")", ":", "message", "=", "messages", ".", "NewAuthorization", "(", "identifier", "=", "identifier", ")", "return", "(", "DeferredContext", "(", "self", ".", "_client", ".", "post", "(", "self", ".", "directory", "[", "message", "]", ",", "message", ")", ")", ".", "addCallback", "(", "self", ".", "_expect_response", ",", "http", ".", "CREATED", ")", ".", "addCallback", "(", "self", ".", "_parse_authorization", ")", ".", "addCallback", "(", "self", ".", "_check_authorization", ",", "identifier", ")", ".", "addCallback", "(", "tap", "(", "lambda", "a", ":", "action", ".", "add_success_fields", "(", "authorization", "=", "a", ")", ")", ")", ".", "addActionFinish", "(", ")", ")" ]
Create a new authorization. :param ~acme.messages.Identifier identifier: The identifier to authorize. :return: The new authorization resource. :rtype: Deferred[`~acme.messages.AuthorizationResource`]
[ "Create", "a", "new", "authorization", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L268-L289
twisted/txacme
src/txacme/client.py
Client._expect_response
def _expect_response(cls, response, code): """ Ensure we got the expected response code. """ if response.code != code: raise errors.ClientError( 'Expected {!r} response but got {!r}'.format( code, response.code)) return response
python
def _expect_response(cls, response, code): """ Ensure we got the expected response code. """ if response.code != code: raise errors.ClientError( 'Expected {!r} response but got {!r}'.format( code, response.code)) return response
[ "def", "_expect_response", "(", "cls", ",", "response", ",", "code", ")", ":", "if", "response", ".", "code", "!=", "code", ":", "raise", "errors", ".", "ClientError", "(", "'Expected {!r} response but got {!r}'", ".", "format", "(", "code", ",", "response", ".", "code", ")", ")", "return", "response" ]
Ensure we got the expected response code.
[ "Ensure", "we", "got", "the", "expected", "response", "code", "." ]
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L292-L300